scroll-reth

diff: ignored:
+52451
-2653
+227
-80

This is an overview of the changes in scroll-reth, a fork of reth.

diff --git reth/crates/scroll/alloy/consensus/Cargo.toml scroll-reth/crates/scroll/alloy/consensus/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..00c175c6823b4eb53aa99d66906b4263534f5a1c --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/Cargo.toml @@ -0,0 +1,102 @@ +[package] +name = "scroll-alloy-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-rlp.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true +alloy-primitives = { workspace = true, features = ["rlp"] } + +# misc +derive_more = { workspace = true, features = ["display"] } +serde_with = { workspace = true, optional = true } + +# arbitrary +arbitrary = { workspace = true, features = ["derive"], optional = true } + +reth-codecs = { workspace = true, optional = true } +reth-codecs-derive = { workspace = true, optional = true } + +# required by reth-codecs +modular-bitfield = { workspace = true, optional = true } + +# serde +alloy-serde = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"], optional = true } + +[dev-dependencies] +rand.workspace = true +bincode.workspace = true +serde_json.workspace = true +arbitrary = { workspace = true, features = ["derive"] } +alloy-primitives = { workspace = true, features = ["rand", "arbitrary"] } +reth-codecs = { workspace = true, features = ["test-utils"] } +proptest-arbitrary-interop.workspace = true +proptest.workspace = true + +[features] +default = ["std"] +std = [ + "serde/std", + "alloy-primitives/std", + "reth-codecs?/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-rlp/std", + "alloy-serde/std", + "proptest/std", + "rand/std", + "derive_more/std", + "serde_json/std", + "serde_with?/std", +] +k256 = [ + "alloy-primitives/k256", + "alloy-consensus/k256", +] +kzg = [ + "alloy-eips/kzg", + "alloy-consensus/kzg", + "std", +] +reth-codec = [ + "dep:reth-codecs", + "dep:reth-codecs-derive", + "modular-bitfield", + "std", +] +arbitrary = [ + "std", + "dep:arbitrary", + "alloy-primitives/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-serde/arbitrary", + "reth-codecs?/arbitrary", + "alloy-primitives/rand", +] +serde = [ + "dep:serde", + "dep:alloy-serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "rand/serde", + "reth-codecs?/serde", +] +serde-bincode-compat = [ + "dep:serde_with", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", +]
diff --git reth/crates/scroll/alloy/consensus/README.md scroll-reth/crates/scroll/alloy/consensus/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b311fb2629b4e1adfe3066efc064fd386dc8e1f5 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/README.md @@ -0,0 +1,15 @@ +# scroll-alloy-consensus + +Scroll consensus interface. + +This crate contains constants, types, and functions for implementing Scroll EL consensus and communication. This +includes an extended `ScrollTxEnvelope` type with l1 messages. + +In general a type belongs in this crate if it exists in the `alloy-consensus` crate, but was modified from the base Ethereum protocol in Scroll. +For consensus types that are not modified by Scroll, the `alloy-consensus` types should be used instead. + +## Provenance + +Much of this code was ported from [reth-primitives] as part of ongoing alloy migrations. + +[reth-primitives]: https://github.com/paradigmxyz/reth/tree/main/crates/primitives
diff --git reth/crates/scroll/alloy/consensus/src/lib.rs scroll-reth/crates/scroll/alloy/consensus/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd55f2c39b36b05487ea5dbbf1fdab25aaebcc8d --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/lib.rs @@ -0,0 +1,30 @@ +#![doc = include_str!("../README.md")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/alloy-rs/core/main/assets/alloy.jpg", + html_favicon_url = "https://raw.githubusercontent.com/alloy-rs/core/main/assets/favicon.ico" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc as std; + +mod transaction; +pub use transaction::{ + ScrollAdditionalInfo, ScrollL1MessageTransactionFields, ScrollPooledTransaction, + ScrollTransaction, ScrollTransactionInfo, ScrollTxEnvelope, ScrollTxType, + ScrollTypedTransaction, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE, L1_MESSAGE_TX_TYPE_ID, +}; + +mod receipt; +pub use receipt::{ScrollReceiptEnvelope, ScrollReceiptWithBloom, ScrollTransactionReceipt}; + +#[cfg(feature = "serde")] +pub use transaction::serde_l1_message_tx_rpc; + +/// Bincode-compatible serde implementations. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::transaction::serde_bincode_compat::*; +}
diff --git reth/crates/scroll/alloy/consensus/src/receipt/envelope.rs scroll-reth/crates/scroll/alloy/consensus/src/receipt/envelope.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ba0afdd84d9962c5ae4325ac7c684010a869a6a --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/receipt/envelope.rs @@ -0,0 +1,373 @@ +//! Receipt envelope types for Scroll. + +use crate::ScrollTxType; +use std::vec::Vec; + +use alloy_consensus::{Eip658Value, Receipt, ReceiptWithBloom, TxReceipt}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + Typed2718, +}; +use alloy_primitives::{logs_bloom, Bloom, Log}; +use alloy_rlp::{length_of_length, BufMut, Decodable, Encodable}; + +/// Receipt envelope, as defined in [EIP-2718], modified for Scroll chains. +/// +/// This enum distinguishes between tagged and untagged legacy receipts, as the +/// in-protocol merkle tree may commit to EITHER 0-prefixed or raw. Therefore +/// we must ensure that encoding returns the precise byte-array that was +/// decoded, preserving the presence or absence of the `TransactionType` flag. +/// +/// Transaction receipt payloads are specified in their respective EIPs. +/// +/// [EIP-2718]: https://eips.ethereum.org/EIPS/eip-2718 +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(tag = "type"))] +#[non_exhaustive] +pub enum ScrollReceiptEnvelope<T = Log> { + /// Receipt envelope with no type flag. + #[cfg_attr(feature = "serde", serde(rename = "0x0", alias = "0x00"))] + Legacy(ReceiptWithBloom<Receipt<T>>), + /// Receipt envelope with type flag 1, containing a [EIP-2930] receipt. + /// + /// [EIP-2930]: https://eips.ethereum.org/EIPS/eip-2930 + #[cfg_attr(feature = "serde", serde(rename = "0x1", alias = "0x01"))] + Eip2930(ReceiptWithBloom<Receipt<T>>), + /// Receipt envelope with type flag 2, containing a [EIP-1559] receipt. + /// + /// [EIP-1559]: https://eips.ethereum.org/EIPS/eip-1559 + #[cfg_attr(feature = "serde", serde(rename = "0x2", alias = "0x02"))] + Eip1559(ReceiptWithBloom<Receipt<T>>), + /// Receipt envelope with type flag 4, containing a [EIP-7702] receipt. + /// + /// [EIP-7702]: https://eips.ethereum.org/EIPS/eip-7702 + #[cfg_attr(feature = "serde", serde(rename = "0x4", alias = "0x04"))] + Eip7702(ReceiptWithBloom<Receipt<T>>), + /// Receipt envelope with type flag 126, containing a [Scroll-L1-Message] receipt. + #[cfg_attr(feature = "serde", serde(rename = "0x7e", alias = "0x7E"))] + L1Message(ReceiptWithBloom<Receipt<T>>), +} + +impl ScrollReceiptEnvelope<Log> { + /// Creates a new [`ScrollReceiptEnvelope`] from the given parts. + pub fn from_parts<'a>( + status: bool, + cumulative_gas_used: u64, + logs: impl IntoIterator<Item = &'a Log>, + tx_type: ScrollTxType, + ) -> Self { + let logs = logs.into_iter().cloned().collect::<Vec<_>>(); + let logs_bloom = logs_bloom(&logs); + let inner_receipt = + Receipt { status: Eip658Value::Eip658(status), cumulative_gas_used, logs }; + match tx_type { + ScrollTxType::Legacy => { + Self::Legacy(ReceiptWithBloom { receipt: inner_receipt, logs_bloom }) + } + ScrollTxType::Eip2930 => { + Self::Eip2930(ReceiptWithBloom { receipt: inner_receipt, logs_bloom }) + } + ScrollTxType::Eip1559 => { + Self::Eip1559(ReceiptWithBloom { receipt: inner_receipt, logs_bloom }) + } + ScrollTxType::Eip7702 => { + Self::Eip7702(ReceiptWithBloom { receipt: inner_receipt, logs_bloom }) + } + ScrollTxType::L1Message => { + Self::L1Message(ReceiptWithBloom { receipt: inner_receipt, logs_bloom }) + } + } + } +} + +impl<T> ScrollReceiptEnvelope<T> { + /// Return the [`ScrollTxType`] of the inner receipt. + pub const fn tx_type(&self) -> ScrollTxType { + match self { + Self::Legacy(_) => ScrollTxType::Legacy, + Self::Eip2930(_) => ScrollTxType::Eip2930, + Self::Eip1559(_) => ScrollTxType::Eip1559, + Self::Eip7702(_) => ScrollTxType::Eip7702, + Self::L1Message(_) => ScrollTxType::L1Message, + } + } + + /// Return true if the transaction was successful. + pub const fn is_success(&self) -> bool { + self.status() + } + + /// Returns the success status of the receipt's transaction. + pub const fn status(&self) -> bool { + self.as_receipt().unwrap().status.coerce_status() + } + + /// Returns the cumulative gas used at this receipt. + pub const fn cumulative_gas_used(&self) -> u64 { + self.as_receipt().unwrap().cumulative_gas_used + } + + /// Return the receipt logs. + pub fn logs(&self) -> &[T] { + &self.as_receipt().unwrap().logs + } + + /// Return the receipt's bloom. + pub const fn logs_bloom(&self) -> &Bloom { + match self { + Self::Legacy(t) | + Self::Eip2930(t) | + Self::Eip1559(t) | + Self::Eip7702(t) | + Self::L1Message(t) => &t.logs_bloom, + } + } + + /// Returns the L1 message receipt if it is a deposit receipt. + pub const fn as_l1_message_receipt_with_bloom(&self) -> Option<&ReceiptWithBloom<Receipt<T>>> { + match self { + Self::L1Message(t) => Some(t), + _ => None, + } + } + + /// Returns the L1 message receipt if it is a deposit receipt. + pub const fn as_l1_message_receipt(&self) -> Option<&Receipt<T>> { + match self { + Self::L1Message(t) => Some(&t.receipt), + _ => None, + } + } + + /// Return the inner receipt. Currently this is infallible, however, future + /// receipt types may be added. + pub const fn as_receipt(&self) -> Option<&Receipt<T>> { + match self { + Self::Legacy(t) | + Self::Eip2930(t) | + Self::Eip1559(t) | + Self::Eip7702(t) | + Self::L1Message(t) => Some(&t.receipt), + } + } +} + +impl ScrollReceiptEnvelope { + /// Get the length of the inner receipt in the 2718 encoding. + pub fn inner_length(&self) -> usize { + match self { + Self::Legacy(t) | + Self::Eip2930(t) | + Self::Eip1559(t) | + Self::Eip7702(t) | + Self::L1Message(t) => t.length(), + } + } + + /// Calculate the length of the rlp payload of the network encoded receipt. + pub fn rlp_payload_length(&self) -> usize { + let length = self.inner_length(); + match self { + Self::Legacy(_) => length, + _ => length + 1, + } + } +} + +impl<T> TxReceipt for ScrollReceiptEnvelope<T> +where + T: Clone + core::fmt::Debug + PartialEq + Eq + Send + Sync, +{ + type Log = T; + + fn status_or_post_state(&self) -> Eip658Value { + self.as_receipt().unwrap().status + } + + fn status(&self) -> bool { + self.as_receipt().unwrap().status.coerce_status() + } + + /// Return the receipt's bloom. + fn bloom(&self) -> Bloom { + *self.logs_bloom() + } + + fn bloom_cheap(&self) -> Option<Bloom> { + Some(self.bloom()) + } + + /// Returns the cumulative gas used at this receipt. + fn cumulative_gas_used(&self) -> u64 { + self.as_receipt().unwrap().cumulative_gas_used + } + + /// Return the receipt logs. + fn logs(&self) -> &[T] { + &self.as_receipt().unwrap().logs + } +} + +impl Encodable for ScrollReceiptEnvelope { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.network_encode(out) + } + + fn length(&self) -> usize { + let mut payload_length = self.rlp_payload_length(); + if !self.is_legacy() { + payload_length += length_of_length(payload_length); + } + payload_length + } +} + +impl Decodable for ScrollReceiptEnvelope { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + Self::network_decode(buf) + .map_or_else(|_| Err(alloy_rlp::Error::Custom("Unexpected type")), Ok) + } +} + +impl Encodable2718 for ScrollReceiptEnvelope { + fn type_flag(&self) -> Option<u8> { + match self { + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(ScrollTxType::Eip2930 as u8), + Self::Eip1559(_) => Some(ScrollTxType::Eip1559 as u8), + Self::Eip7702(_) => Some(ScrollTxType::Eip7702 as u8), + Self::L1Message(_) => Some(ScrollTxType::L1Message as u8), + } + } + + fn encode_2718_len(&self) -> usize { + self.inner_length() + !self.is_legacy() as usize + } + + fn encode_2718(&self, out: &mut dyn BufMut) { + match self.type_flag() { + None => {} + Some(ty) => out.put_u8(ty), + } + match self { + Self::Legacy(t) | + Self::Eip2930(t) | + Self::Eip1559(t) | + Self::Eip7702(t) | + Self::L1Message(t) => t.encode(out), + } + } +} + +impl Typed2718 for ScrollReceiptEnvelope { + fn ty(&self) -> u8 { + let ty = match self { + Self::Legacy(_) => ScrollTxType::Legacy, + Self::Eip2930(_) => ScrollTxType::Eip2930, + Self::Eip1559(_) => ScrollTxType::Eip1559, + Self::Eip7702(_) => ScrollTxType::Eip7702, + Self::L1Message(_) => ScrollTxType::L1Message, + }; + ty as u8 + } +} + +impl Decodable2718 for ScrollReceiptEnvelope { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + ScrollTxType::Legacy => { + Err(alloy_rlp::Error::Custom("type-0 eip2718 transactions are not supported") + .into()) + } + ScrollTxType::Eip2930 => Ok(Self::Eip2930(Decodable::decode(buf)?)), + ScrollTxType::Eip1559 => Ok(Self::Eip1559(Decodable::decode(buf)?)), + ScrollTxType::Eip7702 => Ok(Self::Eip7702(Decodable::decode(buf)?)), + ScrollTxType::L1Message => Ok(Self::L1Message(Decodable::decode(buf)?)), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> { + Ok(Self::Legacy(Decodable::decode(buf)?)) + } +} + +#[cfg(all(test, feature = "arbitrary"))] +impl<'a, T> arbitrary::Arbitrary<'a> for ScrollReceiptEnvelope<T> +where + T: arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> { + match u.int_in_range(0..=4)? { + 0 => Ok(Self::Legacy(ReceiptWithBloom::arbitrary(u)?)), + 1 => Ok(Self::Eip2930(ReceiptWithBloom::arbitrary(u)?)), + 2 => Ok(Self::Eip1559(ReceiptWithBloom::arbitrary(u)?)), + _ => Ok(Self::L1Message(ReceiptWithBloom::arbitrary(u)?)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{Receipt, ReceiptWithBloom}; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::{address, b256, bytes, hex, Log, LogData}; + use alloy_rlp::Encodable; + + #[cfg(not(feature = "std"))] + use alloc::vec; + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn encode_legacy_receipt() { + let expected = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); + + let mut data = vec![]; + let receipt = ScrollReceiptEnvelope::Legacy(ReceiptWithBloom { + receipt: Receipt { + status: false.into(), + cumulative_gas_used: 0x1, + logs: vec![Log { + address: address!("0000000000000000000000000000000000000011"), + data: LogData::new_unchecked( + vec![ + b256!( + "000000000000000000000000000000000000000000000000000000000000dead" + ), + b256!( + "000000000000000000000000000000000000000000000000000000000000beef" + ), + ], + bytes!("0100ff"), + ), + }], + }, + logs_bloom: [0; 256].into(), + }); + + receipt.network_encode(&mut data); + + // check that the rlp length equals the length of the expected rlp + assert_eq!(receipt.length(), expected.len()); + assert_eq!(data, expected); + } + + #[test] + fn legacy_receipt_from_parts() { + let receipt = ScrollReceiptEnvelope::from_parts(true, 100, vec![], ScrollTxType::Legacy); + assert!(receipt.status()); + assert_eq!(receipt.cumulative_gas_used(), 100); + assert_eq!(receipt.logs().len(), 0); + assert_eq!(receipt.tx_type(), ScrollTxType::Legacy); + } + + #[test] + fn l1_message_receipt_from_parts() { + let receipt = ScrollReceiptEnvelope::from_parts(true, 100, vec![], ScrollTxType::L1Message); + assert!(receipt.status()); + assert_eq!(receipt.cumulative_gas_used(), 100); + assert_eq!(receipt.logs().len(), 0); + assert_eq!(receipt.tx_type(), ScrollTxType::L1Message); + } +}
diff --git reth/crates/scroll/alloy/consensus/src/receipt/mod.rs scroll-reth/crates/scroll/alloy/consensus/src/receipt/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c06b4838314b7c82fa98a4e23231d6c0b00efc98 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/receipt/mod.rs @@ -0,0 +1,6 @@ +mod envelope; +pub use envelope::ScrollReceiptEnvelope; + +#[allow(clippy::module_inception)] +mod receipt; +pub use receipt::{ScrollReceiptWithBloom, ScrollTransactionReceipt};
diff --git reth/crates/scroll/alloy/consensus/src/receipt/receipt.rs scroll-reth/crates/scroll/alloy/consensus/src/receipt/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..5cdd1b3e1134d5a0d3dc6f9e6888eb7d2a441814 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/receipt/receipt.rs @@ -0,0 +1,256 @@ +//! Transaction receipt types for Scroll. + +use alloy_consensus::{ + Eip658Value, Receipt, ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, +}; +use alloy_primitives::{Bloom, Log, U256}; +use alloy_rlp::{Buf, BufMut, Decodable, Encodable, Header}; + +/// Receipt containing result of transaction execution. +#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +pub struct ScrollTransactionReceipt<T = Log> { + /// The inner receipt type. + #[cfg_attr(feature = "serde", serde(flatten))] + pub inner: Receipt<T>, + /// L1 fee for Scroll transactions. + pub l1_fee: U256, +} + +impl<T> ScrollTransactionReceipt<T> { + /// Returns a new [`ScrollTransactionReceipt`] from the inner receipt and the l1 fee. + pub const fn new(inner: Receipt<T>, l1_fee: U256) -> Self { + Self { inner, l1_fee } + } +} + +impl ScrollTransactionReceipt { + /// Calculates [`Log`]'s bloom filter. This is slow operation and [`ScrollReceiptWithBloom`] + /// can be used to cache this value. + pub fn bloom_slow(&self) -> Bloom { + self.inner.logs.iter().collect() + } + + /// Calculates the bloom filter for the receipt and returns the [`ScrollReceiptWithBloom`] + /// container type. + pub fn with_bloom(self) -> ScrollReceiptWithBloom { + self.into() + } +} + +impl<T: Encodable> ScrollTransactionReceipt<T> { + /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. + /// Does not include the L1 fee field which is not part of the consensus encoding of a receipt. + /// <https://github.com/scroll-tech/go-ethereum/blob/9fff27e4f34fb5097100ed76ee725ce056267f4b/core/types/receipt.go#L96-L102> + pub fn rlp_encoded_fields_length_with_bloom(&self, bloom: &Bloom) -> usize { + self.inner.rlp_encoded_fields_length_with_bloom(bloom) + } + + /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. + /// Does not include the L1 fee field which is not part of the consensus encoding of a receipt. + /// <https://github.com/scroll-tech/go-ethereum/blob/9fff27e4f34fb5097100ed76ee725ce056267f4b/core/types/receipt.go#L96-L102> + pub fn rlp_encode_fields_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + self.inner.rlp_encode_fields_with_bloom(bloom, out); + } + + /// Returns RLP header for this receipt encoding with the given [`Bloom`]. + /// Does not include the L1 fee field which is not part of the consensus encoding of a receipt. + /// <https://github.com/scroll-tech/go-ethereum/blob/9fff27e4f34fb5097100ed76ee725ce056267f4b/core/types/receipt.go#L96-L102> + pub fn rlp_header_with_bloom(&self, bloom: &Bloom) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length_with_bloom(bloom) } + } +} + +impl<T: Decodable> ScrollTransactionReceipt<T> { + /// RLP-decodes receipt's field with a [`Bloom`]. + /// + /// Does not expect an RLP header. + pub fn rlp_decode_fields_with_bloom( + buf: &mut &[u8], + ) -> alloy_rlp::Result<ReceiptWithBloom<Self>> { + let ReceiptWithBloom { receipt: inner, logs_bloom } = + Receipt::rlp_decode_fields_with_bloom(buf)?; + + Ok(ReceiptWithBloom { logs_bloom, receipt: Self { inner, l1_fee: Default::default() } }) + } +} + +impl<T> AsRef<Receipt<T>> for ScrollTransactionReceipt<T> { + fn as_ref(&self) -> &Receipt<T> { + &self.inner + } +} + +impl<T> TxReceipt for ScrollTransactionReceipt<T> +where + T: AsRef<Log> + Clone + core::fmt::Debug + PartialEq + Eq + Send + Sync, +{ + type Log = T; + + fn status_or_post_state(&self) -> Eip658Value { + self.inner.status_or_post_state() + } + + fn status(&self) -> bool { + self.inner.status() + } + + fn bloom(&self) -> Bloom { + self.inner.bloom_slow() + } + + fn cumulative_gas_used(&self) -> u64 { + self.inner.cumulative_gas_used() + } + + fn logs(&self) -> &[Self::Log] { + self.inner.logs() + } +} + +impl<T: Encodable> RlpEncodableReceipt for ScrollTransactionReceipt<T> { + fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + self.rlp_header_with_bloom(bloom).length_with_payload() + } + + fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + self.rlp_header_with_bloom(bloom).encode(out); + self.rlp_encode_fields_with_bloom(bloom, out); + } +} + +impl<T: Decodable> RlpDecodableReceipt for ScrollTransactionReceipt<T> { + fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result<ReceiptWithBloom<Self>> { + let header = Header::decode(buf)?; + if !header.list { + return Err(alloy_rlp::Error::UnexpectedString); + } + + if buf.len() < header.payload_length { + return Err(alloy_rlp::Error::InputTooShort); + } + + // Note: we pass a separate buffer to `rlp_decode_fields_with_bloom` to allow it decode + // optional fields based on the remaining length. + let mut fields_buf = &buf[..header.payload_length]; + let this = Self::rlp_decode_fields_with_bloom(&mut fields_buf)?; + + if !fields_buf.is_empty() { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + buf.advance(header.payload_length); + + Ok(this) + } +} + +/// [`ScrollTransactionReceipt`] with calculated bloom filter, modified for Scroll. +/// +/// This convenience type allows us to lazily calculate the bloom filter for a +/// receipt, similar to [`Sealed`]. +/// +/// [`Sealed`]: alloy_consensus::Sealed +pub type ScrollReceiptWithBloom<T = Log> = ReceiptWithBloom<ScrollTransactionReceipt<T>>; + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, T> arbitrary::Arbitrary<'a> for ScrollTransactionReceipt<T> +where + T: arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> { + #[cfg(not(feature = "std"))] + use alloc::vec::Vec; + Ok(Self { + inner: Receipt { + status: Eip658Value::arbitrary(u)?, + cumulative_gas_used: u64::arbitrary(u)?, + logs: Vec::<T>::arbitrary(u)?, + }, + l1_fee: U256::arbitrary(u)?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Receipt; + use alloy_primitives::{address, b256, bytes, hex, Bytes, Log, LogData}; + use alloy_rlp::{Decodable, Encodable}; + + #[cfg(not(feature = "std"))] + use alloc::{vec, vec::Vec}; + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn decode_legacy_receipt() { + let data = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); + + // EIP658Receipt + let expected = + ScrollReceiptWithBloom { + receipt: ScrollTransactionReceipt { + inner: Receipt { + status: false.into(), + cumulative_gas_used: 0x1, + logs: vec![Log { + address: address!("0000000000000000000000000000000000000011"), + data: LogData::new_unchecked( + vec![ + b256!("000000000000000000000000000000000000000000000000000000000000dead"), + b256!("000000000000000000000000000000000000000000000000000000000000beef"), + ], + bytes!("0100ff"), + ), + }], + }, + l1_fee: U256::ZERO + }, + logs_bloom: [0; 256].into(), + }; + + let receipt = ScrollReceiptWithBloom::decode(&mut &data[..]).unwrap(); + assert_eq!(receipt, expected); + } + + #[test] + fn gigantic_receipt() { + let receipt = ScrollTransactionReceipt { + inner: Receipt { + cumulative_gas_used: 16747627, + status: true.into(), + logs: vec![ + Log { + address: address!("4bf56695415f725e43c3e04354b604bcfb6dfb6e"), + data: LogData::new_unchecked( + vec![b256!( + "c69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9" + )], + Bytes::from(vec![1; 0xffffff]), + ), + }, + Log { + address: address!("faca325c86bf9c2d5b413cd7b90b209be92229c2"), + data: LogData::new_unchecked( + vec![b256!( + "8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2" + )], + Bytes::from(vec![1; 0xffffff]), + ), + }, + ], + }, + l1_fee: U256::ZERO, + } + .with_bloom(); + + let mut data = vec![]; + + receipt.encode(&mut data); + let decoded = ScrollReceiptWithBloom::decode(&mut &data[..]).unwrap(); + + assert_eq!(decoded, receipt); + } +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/envelope.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/envelope.rs new file mode 100644 index 0000000000000000000000000000000000000000..35e8e3265a36f5a849256d6f39663d4b6b518bf1 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/envelope.rs @@ -0,0 +1,973 @@ +use crate::{ScrollPooledTransaction, ScrollTxType, ScrollTypedTransaction, TxL1Message}; +use core::hash::Hash; + +use alloy_consensus::{ + error::ValueError, + transaction::{RlpEcdsaDecodableTx, TxHashRef}, + Sealable, Sealed, Signed, Transaction, TxEip1559, TxEip2930, TxEip7702, TxLegacy, Typed2718, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{Address, Bytes, Signature, TxHash, TxKind, B256, U256}; +use alloy_rlp::{Decodable, Encodable}; +#[cfg(feature = "reth-codec")] +use reth_codecs::{ + Compact, + __private::bytes::BufMut, + alloy::transaction::{CompactEnvelope, Envelope, FromTxCompact, ToTxCompact}, +}; + +/// The Ethereum [EIP-2718] Transaction Envelope, modified for Scroll chains. +/// +/// # Note: +/// +/// This enum distinguishes between tagged and untagged legacy transactions, as +/// the in-protocol merkle tree may commit to EITHER 0-prefixed or raw. +/// Therefore we must ensure that encoding returns the precise byte-array that +/// was decoded, preserving the presence or absence of the `TransactionType` +/// flag. +/// +/// [EIP-2718]: https://eips.ethereum.org/EIPS/eip-2718 +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "serde", + serde(into = "serde_from::TaggedTxEnvelope", from = "serde_from::MaybeTaggedTxEnvelope") +)] +#[cfg_attr(all(any(test, feature = "arbitrary"), feature = "k256"), derive(arbitrary::Arbitrary))] +pub enum ScrollTxEnvelope { + /// An untagged [`TxLegacy`]. + Legacy(Signed<TxLegacy>), + /// A [`TxEip2930`] tagged with type 1. + Eip2930(Signed<TxEip2930>), + /// A [`TxEip1559`] tagged with type 2. + Eip1559(Signed<TxEip1559>), + /// EIP-7702 transaction + Eip7702(Signed<TxEip7702>), + /// A [`TxL1Message`] tagged with type 0x7E. + L1Message(Sealed<TxL1Message>), +} + +impl From<Signed<TxLegacy>> for ScrollTxEnvelope { + fn from(v: Signed<TxLegacy>) -> Self { + Self::Legacy(v) + } +} + +impl From<Signed<TxEip2930>> for ScrollTxEnvelope { + fn from(v: Signed<TxEip2930>) -> Self { + Self::Eip2930(v) + } +} + +impl From<Signed<TxEip1559>> for ScrollTxEnvelope { + fn from(v: Signed<TxEip1559>) -> Self { + Self::Eip1559(v) + } +} + +impl From<Signed<TxEip7702>> for ScrollTxEnvelope { + fn from(v: Signed<TxEip7702>) -> Self { + Self::Eip7702(v) + } +} + +impl From<TxL1Message> for ScrollTxEnvelope { + fn from(v: TxL1Message) -> Self { + v.seal_slow().into() + } +} + +impl From<Sealed<TxL1Message>> for ScrollTxEnvelope { + fn from(v: Sealed<TxL1Message>) -> Self { + Self::L1Message(v) + } +} + +impl From<Signed<ScrollTypedTransaction>> for ScrollTxEnvelope { + fn from(value: Signed<ScrollTypedTransaction>) -> Self { + let (tx, sig, hash) = value.into_parts(); + match tx { + ScrollTypedTransaction::Legacy(tx_legacy) => { + let tx = Signed::new_unchecked(tx_legacy, sig, hash); + Self::Legacy(tx) + } + ScrollTypedTransaction::Eip2930(tx_eip2930) => { + let tx = Signed::new_unchecked(tx_eip2930, sig, hash); + Self::Eip2930(tx) + } + ScrollTypedTransaction::Eip1559(tx_eip1559) => { + let tx = Signed::new_unchecked(tx_eip1559, sig, hash); + Self::Eip1559(tx) + } + ScrollTypedTransaction::Eip7702(tx_eip7702) => { + let tx = Signed::new_unchecked(tx_eip7702, sig, hash); + Self::Eip7702(tx) + } + ScrollTypedTransaction::L1Message(tx) => { + Self::L1Message(Sealed::new_unchecked(tx, hash)) + } + } + } +} + +impl Typed2718 for ScrollTxEnvelope { + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + Self::L1Message(tx) => tx.ty(), + } + } +} + +impl Transaction for ScrollTxEnvelope { + fn chain_id(&self) -> Option<u64> { + match self { + Self::Legacy(tx) => tx.tx().chain_id(), + Self::Eip2930(tx) => tx.tx().chain_id(), + Self::Eip1559(tx) => tx.tx().chain_id(), + Self::Eip7702(tx) => tx.tx().chain_id(), + Self::L1Message(tx) => tx.chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), + Self::L1Message(tx) => tx.nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().gas_limit(), + Self::Eip2930(tx) => tx.tx().gas_limit(), + Self::Eip1559(tx) => tx.tx().gas_limit(), + Self::Eip7702(tx) => tx.tx().gas_limit(), + Self::L1Message(tx) => tx.gas_limit(), + } + } + + fn gas_price(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.tx().gas_price(), + Self::Eip2930(tx) => tx.tx().gas_price(), + Self::Eip1559(tx) => tx.tx().gas_price(), + Self::Eip7702(tx) => tx.tx().gas_price(), + Self::L1Message(tx) => tx.gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_gas(), + Self::L1Message(tx) => tx.max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_priority_fee_per_gas(), + Self::L1Message(tx) => tx.max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_blob_gas(), + Self::L1Message(tx) => tx.max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().priority_fee_or_price(), + Self::Eip2930(tx) => tx.tx().priority_fee_or_price(), + Self::Eip1559(tx) => tx.tx().priority_fee_or_price(), + Self::Eip7702(tx) => tx.tx().priority_fee_or_price(), + Self::L1Message(tx) => tx.priority_fee_or_price(), + } + } + + fn to(&self) -> Option<Address> { + match self { + Self::Legacy(tx) => tx.tx().to(), + Self::Eip2930(tx) => tx.tx().to(), + Self::Eip1559(tx) => tx.tx().to(), + Self::Eip7702(tx) => tx.tx().to(), + Self::L1Message(tx) => tx.to(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.tx().kind(), + Self::Eip2930(tx) => tx.tx().kind(), + Self::Eip1559(tx) => tx.tx().kind(), + Self::Eip7702(tx) => tx.tx().kind(), + Self::L1Message(tx) => tx.kind(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.tx().value(), + Self::Eip2930(tx) => tx.tx().value(), + Self::Eip1559(tx) => tx.tx().value(), + Self::Eip7702(tx) => tx.tx().value(), + Self::L1Message(tx) => tx.value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.tx().input(), + Self::Eip2930(tx) => tx.tx().input(), + Self::Eip1559(tx) => tx.tx().input(), + Self::Eip7702(tx) => tx.tx().input(), + Self::L1Message(tx) => tx.input(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.tx().access_list(), + Self::Eip2930(tx) => tx.tx().access_list(), + Self::Eip1559(tx) => tx.tx().access_list(), + Self::Eip7702(tx) => tx.tx().access_list(), + Self::L1Message(tx) => tx.access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip2930(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip1559(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip7702(tx) => tx.tx().blob_versioned_hashes(), + Self::L1Message(tx) => tx.blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.tx().authorization_list(), + Self::Eip2930(tx) => tx.tx().authorization_list(), + Self::Eip1559(tx) => tx.tx().authorization_list(), + Self::Eip7702(tx) => tx.tx().authorization_list(), + Self::L1Message(tx) => tx.authorization_list(), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_dynamic_fee(), + Self::Eip2930(tx) => tx.tx().is_dynamic_fee(), + Self::Eip1559(tx) => tx.tx().is_dynamic_fee(), + Self::Eip7702(tx) => tx.tx().is_dynamic_fee(), + Self::L1Message(tx) => tx.is_dynamic_fee(), + } + } + + fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.tx().effective_gas_price(base_fee), + Self::L1Message(tx) => tx.effective_gas_price(base_fee), + } + } + + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_create(), + Self::Eip2930(tx) => tx.tx().is_create(), + Self::Eip1559(tx) => tx.tx().is_create(), + Self::Eip7702(tx) => tx.tx().is_create(), + Self::L1Message(tx) => tx.is_create(), + } + } +} + +impl ScrollTxEnvelope { + /// Returns true if the transaction is a legacy transaction. + #[inline] + pub const fn is_legacy(&self) -> bool { + matches!(self, Self::Legacy(_)) + } + + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + pub const fn is_eip2930(&self) -> bool { + matches!(self, Self::Eip2930(_)) + } + + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + pub const fn is_eip1559(&self) -> bool { + matches!(self, Self::Eip1559(_)) + } + + /// Returns true if the transaction is an EIP-7702 transaction. + #[inline] + pub const fn is_eip7702(&self) -> bool { + matches!(self, Self::Eip7702(_)) + } + + /// Returns true if the transaction is a deposit transaction. + #[inline] + pub const fn is_l1_message(&self) -> bool { + matches!(self, Self::L1Message(_)) + } + + /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. + pub const fn as_legacy(&self) -> Option<&Signed<TxLegacy>> { + match self { + Self::Legacy(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. + pub const fn as_eip2930(&self) -> Option<&Signed<TxEip2930>> { + match self { + Self::Eip2930(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. + pub const fn as_eip1559(&self) -> Option<&Signed<TxEip1559>> { + match self { + Self::Eip1559(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [`TxEip7702`] variant if the transaction is an EIP-1559 transaction. + pub const fn as_eip7702(&self) -> Option<&Signed<TxEip7702>> { + match self { + Self::Eip7702(tx) => Some(tx), + _ => None, + } + } + + /// Returns the [`TxL1Message`] variant if the transaction is a deposit transaction. + pub const fn as_l1_message(&self) -> Option<&Sealed<TxL1Message>> { + match self { + Self::L1Message(tx) => Some(tx), + _ => None, + } + } + + /// Return the [`ScrollTxType`] of the inner txn. + pub const fn tx_type(&self) -> ScrollTxType { + match self { + Self::Legacy(_) => ScrollTxType::Legacy, + Self::Eip2930(_) => ScrollTxType::Eip2930, + Self::Eip1559(_) => ScrollTxType::Eip1559, + Self::Eip7702(_) => ScrollTxType::Eip7702, + Self::L1Message(_) => ScrollTxType::L1Message, + } + } + + /// Return the length of the inner txn, including type byte length + pub fn eip2718_encoded_length(&self) -> usize { + match self { + Self::Legacy(t) => t.eip2718_encoded_length(), + Self::Eip2930(t) => t.eip2718_encoded_length(), + Self::Eip1559(t) => t.eip2718_encoded_length(), + Self::Eip7702(t) => t.eip2718_encoded_length(), + Self::L1Message(t) => t.eip2718_encoded_length(), + } + } + + /// Returns the signature for the transaction. + pub const fn signature(&self) -> Option<Signature> { + match self { + Self::Legacy(t) => Some(*t.signature()), + Self::Eip2930(t) => Some(*t.signature()), + Self::Eip1559(t) => Some(*t.signature()), + Self::Eip7702(t) => Some(*t.signature()), + Self::L1Message(_) => None, + } + } + + /// Converts the [`ScrollTxEnvelope`] into a [`ScrollPooledTransaction`], returns an error if + /// the transaction is a L1 message. + pub fn try_into_pooled(self) -> Result<ScrollPooledTransaction, ValueError<Self>> { + match self { + Self::Legacy(tx) => Ok(tx.into()), + Self::Eip2930(tx) => Ok(tx.into()), + Self::Eip1559(tx) => Ok(tx.into()), + Self::Eip7702(tx) => Ok(tx.into()), + Self::L1Message(tx) => Err(ValueError::new(tx.into(), "L1 messages cannot be pooled")), + } + } +} + +/// A Scroll chain transaction. +pub trait ScrollTransaction { + /// Returns true if the transaction is a L1 message. + fn is_l1_message(&self) -> bool; + /// Returns the queue index if the transaction is a L1 message, None otherwise. + fn queue_index(&self) -> Option<u64>; +} + +impl ScrollTransaction for ScrollTxEnvelope { + fn is_l1_message(&self) -> bool { + match self { + Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) | Self::Eip7702(_) => false, + Self::L1Message(_) => true, + } + } + + fn queue_index(&self) -> Option<u64> { + match self { + Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) | Self::Eip7702(_) => None, + Self::L1Message(tx) => Some(tx.queue_index), + } + } +} + +#[cfg(feature = "reth-codec")] +impl ToTxCompact for ScrollTxEnvelope { + fn to_tx_compact(&self, buf: &mut (impl BufMut + AsMut<[u8]>)) { + match self { + Self::Legacy(tx) => tx.tx().to_compact(buf), + Self::Eip2930(tx) => tx.tx().to_compact(buf), + Self::Eip1559(tx) => tx.tx().to_compact(buf), + Self::Eip7702(tx) => tx.tx().to_compact(buf), + Self::L1Message(tx) => tx.to_compact(buf), + }; + } +} + +#[cfg(feature = "reth-codec")] +impl FromTxCompact for ScrollTxEnvelope { + type TxType = ScrollTxType; + + fn from_tx_compact(buf: &[u8], tx_type: ScrollTxType, signature: Signature) -> (Self, &[u8]) { + match tx_type { + ScrollTxType::Legacy => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Legacy(tx), buf) + } + ScrollTxType::Eip2930 => { + let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Eip2930(tx), buf) + } + ScrollTxType::Eip1559 => { + let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Eip1559(tx), buf) + } + ScrollTxType::Eip7702 => { + let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); + let tx = Signed::new_unhashed(tx, signature); + (Self::Eip7702(tx), buf) + } + ScrollTxType::L1Message => { + let (tx, buf) = TxL1Message::from_compact(buf, buf.len()); + let tx = Sealed::new(tx); + (Self::L1Message(tx), buf) + } + } + } +} + +impl TxHashRef for ScrollTxEnvelope { + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::L1Message(tx) => tx.hash_ref(), + } + } +} + +#[cfg(feature = "reth-codec")] +const L1_MESSAGE_SIGNATURE: Signature = Signature::new(U256::ZERO, U256::ZERO, false); + +#[cfg(feature = "reth-codec")] +impl Envelope for ScrollTxEnvelope { + fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::L1Message(_) => &L1_MESSAGE_SIGNATURE, + } + } + + fn tx_type(&self) -> Self::TxType { + Self::tx_type(self) + } +} + +#[cfg(feature = "reth-codec")] +impl Compact for ScrollTxEnvelope { + fn to_compact<B>(&self, buf: &mut B) -> usize + where + B: BufMut + AsMut<[u8]>, + { + CompactEnvelope::to_compact(self, buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + CompactEnvelope::from_compact(buf, len) + } +} + +impl Encodable for ScrollTxEnvelope { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.network_encode(out) + } + + fn length(&self) -> usize { + self.network_len() + } +} + +impl Decodable for ScrollTxEnvelope { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + Ok(Self::network_decode(buf)?) + } +} + +impl Decodable2718 for ScrollTxEnvelope { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + ScrollTxType::Eip2930 => Ok(Self::Eip2930(TxEip2930::rlp_decode_signed(buf)?)), + ScrollTxType::Eip1559 => Ok(Self::Eip1559(TxEip1559::rlp_decode_signed(buf)?)), + ScrollTxType::Eip7702 => Ok(Self::Eip7702(TxEip7702::rlp_decode_signed(buf)?)), + ScrollTxType::L1Message => Ok(Self::L1Message(TxL1Message::decode(buf)?.seal_slow())), + ScrollTxType::Legacy => { + Err(alloy_rlp::Error::Custom("type-0 eip2718 transactions are not supported") + .into()) + } + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> { + Ok(Self::Legacy(TxLegacy::rlp_decode_signed(buf)?)) + } +} + +impl Encodable2718 for ScrollTxEnvelope { + fn type_flag(&self) -> Option<u8> { + match self { + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(ScrollTxType::Eip2930 as u8), + Self::Eip1559(_) => Some(ScrollTxType::Eip1559 as u8), + Self::Eip7702(_) => Some(ScrollTxType::Eip7702 as u8), + Self::L1Message(_) => Some(ScrollTxType::L1Message as u8), + } + } + + fn encode_2718_len(&self) -> usize { + self.eip2718_encoded_length() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + match self { + // Legacy transactions have no difference between network and 2718 + Self::Legacy(tx) => tx.eip2718_encode(out), + Self::Eip2930(tx) => { + tx.eip2718_encode(out); + } + Self::Eip1559(tx) => { + tx.eip2718_encode(out); + } + Self::Eip7702(tx) => { + tx.eip2718_encode(out); + } + Self::L1Message(tx) => { + tx.eip2718_encode(out); + } + } + } + + fn trie_hash(&self) -> B256 { + match self { + Self::Legacy(tx) => *tx.hash(), + Self::Eip2930(tx) => *tx.hash(), + Self::Eip1559(tx) => *tx.hash(), + Self::Eip7702(tx) => *tx.hash(), + Self::L1Message(tx) => tx.seal(), + } + } +} + +#[cfg(feature = "k256")] +impl alloy_consensus::transaction::SignerRecoverable for ScrollTxEnvelope { + fn recover_signer(&self) -> Result<Address, alloy_consensus::crypto::RecoveryError> { + let signature_hash = match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::L1Message(tx) => return Ok(tx.sender), + }; + let signature = self.signature().expect("handled L1 message in previous match"); + alloy_consensus::crypto::secp256k1::recover_signer(&signature, signature_hash) + } + + fn recover_signer_unchecked(&self) -> Result<Address, alloy_consensus::crypto::RecoveryError> { + let signature_hash = match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::L1Message(tx) => return Ok(tx.sender), + }; + let signature = self.signature().expect("handled L1 message in previous match"); + alloy_consensus::crypto::secp256k1::recover_signer_unchecked(&signature, signature_hash) + } +} + +#[cfg(feature = "serde")] +mod serde_from { + //! NB: Why do we need this? + //! + //! Because the tag may be missing, we need an abstraction over tagged (with + //! type) and untagged (always legacy). This is [`MaybeTaggedTxEnvelope`]. + //! + //! The tagged variant is [`TaggedTxEnvelope`], which always has a type tag. + //! + //! We serialize via [`TaggedTxEnvelope`] and deserialize via + //! [`MaybeTaggedTxEnvelope`]. + use super::*; + + #[derive(Debug, serde::Deserialize)] + #[serde(untagged)] + pub(crate) enum MaybeTaggedTxEnvelope { + Tagged(TaggedTxEnvelope), + #[serde(with = "alloy_consensus::transaction::signed_legacy_serde")] + Untagged(Signed<TxLegacy>), + } + + #[derive(Debug, serde::Serialize, serde::Deserialize)] + #[serde(tag = "type")] + pub(crate) enum TaggedTxEnvelope { + #[serde( + rename = "0x0", + alias = "0x00", + with = "alloy_consensus::transaction::signed_legacy_serde" + )] + Legacy(Signed<TxLegacy>), + #[serde(rename = "0x1", alias = "0x01")] + Eip2930(Signed<TxEip2930>), + #[serde(rename = "0x2", alias = "0x02")] + Eip1559(Signed<TxEip1559>), + #[serde(rename = "0x4", alias = "0x04")] + Eip7702(Signed<TxEip7702>), + #[serde( + rename = "0x7e", + alias = "0x7E", + serialize_with = "crate::serde_l1_message_tx_rpc" + )] + L1Message(Sealed<TxL1Message>), + } + + impl From<MaybeTaggedTxEnvelope> for ScrollTxEnvelope { + fn from(value: MaybeTaggedTxEnvelope) -> Self { + match value { + MaybeTaggedTxEnvelope::Tagged(tagged) => tagged.into(), + MaybeTaggedTxEnvelope::Untagged(tx) => Self::Legacy(tx), + } + } + } + + impl From<TaggedTxEnvelope> for ScrollTxEnvelope { + fn from(value: TaggedTxEnvelope) -> Self { + match value { + TaggedTxEnvelope::Legacy(signed) => Self::Legacy(signed), + TaggedTxEnvelope::Eip2930(signed) => Self::Eip2930(signed), + TaggedTxEnvelope::Eip1559(signed) => Self::Eip1559(signed), + TaggedTxEnvelope::Eip7702(signed) => Self::Eip7702(signed), + TaggedTxEnvelope::L1Message(tx) => Self::L1Message(tx), + } + } + } + + impl From<ScrollTxEnvelope> for TaggedTxEnvelope { + fn from(value: ScrollTxEnvelope) -> Self { + match value { + ScrollTxEnvelope::Legacy(signed) => Self::Legacy(signed), + ScrollTxEnvelope::Eip2930(signed) => Self::Eip2930(signed), + ScrollTxEnvelope::Eip1559(signed) => Self::Eip1559(signed), + ScrollTxEnvelope::Eip7702(signed) => Self::Eip7702(signed), + ScrollTxEnvelope::L1Message(tx) => Self::L1Message(tx), + } + } + } +} + +/// Bincode-compatible serde implementation for `ScrollTxEnvelope`. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub(super) mod serde_bincode_compat { + use crate::TxL1Message; + + use alloy_consensus::{ + transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, + Sealed, Signed, + }; + use alloy_primitives::{Signature, B256}; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible representation of an `ScrollTxEnvelope`. + #[derive(Debug, Serialize, Deserialize)] + pub enum ScrollTxEnvelope<'a> { + /// Legacy variant. + Legacy { + /// Transaction signature. + signature: Signature, + /// Borrowed legacy transaction data. + transaction: TxLegacy<'a>, + }, + /// EIP-2930 variant. + Eip2930 { + /// Transaction signature. + signature: Signature, + /// Borrowed EIP-2930 transaction data. + transaction: TxEip2930<'a>, + }, + /// EIP-1559 variant. + Eip1559 { + /// Transaction signature. + signature: Signature, + /// Borrowed EIP-1559 transaction data. + transaction: TxEip1559<'a>, + }, + /// EIP-7702 variant. + Eip7702 { + /// Transaction signature. + signature: Signature, + /// Borrowed EIP-7702 transaction data. + transaction: TxEip7702<'a>, + }, + /// L1 message variant. + TxL1Message { + /// Precomputed hash. + hash: B256, + /// Borrowed deposit transaction data. + transaction: TxL1Message, + }, + } + + impl<'a> From<&'a super::ScrollTxEnvelope> for ScrollTxEnvelope<'a> { + fn from(value: &'a super::ScrollTxEnvelope) -> Self { + match value { + super::ScrollTxEnvelope::Legacy(signed_legacy) => Self::Legacy { + signature: *signed_legacy.signature(), + transaction: signed_legacy.tx().into(), + }, + super::ScrollTxEnvelope::Eip2930(signed_2930) => Self::Eip2930 { + signature: *signed_2930.signature(), + transaction: signed_2930.tx().into(), + }, + super::ScrollTxEnvelope::Eip1559(signed_1559) => Self::Eip1559 { + signature: *signed_1559.signature(), + transaction: signed_1559.tx().into(), + }, + super::ScrollTxEnvelope::Eip7702(signed_7702) => Self::Eip7702 { + signature: *signed_7702.signature(), + transaction: signed_7702.tx().into(), + }, + super::ScrollTxEnvelope::L1Message(sealed_l1_message) => Self::TxL1Message { + hash: sealed_l1_message.seal(), + transaction: sealed_l1_message.inner().clone(), + }, + } + } + } + + impl<'a> From<ScrollTxEnvelope<'a>> for super::ScrollTxEnvelope { + fn from(value: ScrollTxEnvelope<'a>) -> Self { + match value { + ScrollTxEnvelope::Legacy { signature, transaction } => { + Self::Legacy(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::Eip2930 { signature, transaction } => { + Self::Eip2930(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::Eip1559 { signature, transaction } => { + Self::Eip1559(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::Eip7702 { signature, transaction } => { + Self::Eip7702(Signed::new_unhashed(transaction.into(), signature)) + } + ScrollTxEnvelope::TxL1Message { hash, transaction } => { + Self::L1Message(Sealed::new_unchecked(transaction, hash)) + } + } + } + } + + impl SerializeAs<super::ScrollTxEnvelope> for ScrollTxEnvelope<'_> { + fn serialize_as<S>( + source: &super::ScrollTxEnvelope, + serializer: S, + ) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + let borrowed = ScrollTxEnvelope::from(source); + borrowed.serialize(serializer) + } + } + + impl<'de> DeserializeAs<'de, super::ScrollTxEnvelope> for ScrollTxEnvelope<'de> { + fn deserialize_as<D>(deserializer: D) -> Result<super::ScrollTxEnvelope, D::Error> + where + D: Deserializer<'de>, + { + let borrowed = ScrollTxEnvelope::deserialize(deserializer)?; + Ok(borrowed.into()) + } + } + + #[cfg(test)] + mod tests { + use super::*; + use arbitrary::Arbitrary; + use rand::Rng; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + + /// Tests a bincode round-trip for `ScrollTxEnvelope` using an arbitrary instance. + #[test] + fn test_scroll_tx_envelope_bincode_roundtrip_arbitrary() { + #[serde_as] + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Data { + // Use the bincode-compatible representation defined in this module. + #[serde_as(as = "ScrollTxEnvelope<'_>")] + envelope: super::super::ScrollTxEnvelope, + } + + let mut bytes = [0u8; 1024]; + rand::rng().fill(bytes.as_mut_slice()); + let data = Data { + envelope: super::super::ScrollTxEnvelope::arbitrary( + &mut arbitrary::Unstructured::new(&bytes), + ) + .unwrap(), + }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, data); + } + } +} + +#[cfg(test)] +mod tests { + extern crate alloc; + use super::*; + use alloc::vec; + use alloy_primitives::{hex, Address, Bytes, U256}; + + #[test] + fn test_tx_gas_limit() { + let tx = TxL1Message { gas_limit: 1, ..Default::default() }; + let tx_envelope = ScrollTxEnvelope::L1Message(tx.seal_slow()); + assert_eq!(tx_envelope.gas_limit(), 1); + } + + #[test] + fn test_encode_decode_l1_message() { + let tx = TxL1Message { + queue_index: 1, + gas_limit: 2, + to: Address::left_padding_from(&[3]), + sender: Address::left_padding_from(&[4]), + value: U256::from(4_u64), + input: Bytes::from(vec![5]), + }; + let tx_envelope = ScrollTxEnvelope::L1Message(tx.seal_slow()); + let encoded = tx_envelope.encoded_2718(); + let decoded = ScrollTxEnvelope::decode_2718(&mut encoded.as_ref()).unwrap(); + assert_eq!(encoded.len(), tx_envelope.encode_2718_len()); + assert_eq!(decoded, tx_envelope); + } + + #[test] + #[cfg(feature = "serde")] + fn test_serde_roundtrip_l1_message() { + let tx = TxL1Message { + queue_index: 11, + gas_limit: u64::MAX, + sender: Address::random(), + to: Address::random(), + value: U256::MAX, + input: Bytes::new(), + }; + let tx_envelope = ScrollTxEnvelope::L1Message(tx.seal_slow()); + + let serialized = serde_json::to_string(&tx_envelope).unwrap(); + let deserialized: ScrollTxEnvelope = serde_json::from_str(&serialized).unwrap(); + + assert_eq!(tx_envelope, deserialized); + } + + #[test] + fn eip2718_l1_message_decode() { + // <https://scrollscan.com/tx/0xace7103cc22a372c81cda04e15442a721cd3d5d64eda2e1578ba310d91597d97> + let b = hex!("7ef9015a830e7991831e848094781e90f1c8fc4611c9b7497c3b47f99ef6969cbc80b901248ef1332e000000000000000000000000c186fa914353c44b2e33ebe05f21846f1048beda0000000000000000000000003bad7ad0728f9917d1bf08af5782dcbd516cdd96000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e799100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000044493a4f8411b3f3d662006b9bf68884e71f1fc0f8ea04e4cb188354738202c3e34a473b93000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000947885bcbd5cecef1336b5300fb5186a12ddd8c478"); + + let tx = ScrollTxEnvelope::decode_2718(&mut b[..].as_ref()).unwrap(); + tx.as_l1_message().unwrap(); + } + + #[test] + fn eip1559_decode() { + use alloy_consensus::SignableTransaction; + use alloy_primitives::Signature; + let tx = TxEip1559 { + chain_id: 1u64, + nonce: 2, + max_fee_per_gas: 3, + max_priority_fee_per_gas: 4, + gas_limit: 5, + to: Address::left_padding_from(&[6]).into(), + value: U256::from(7_u64), + input: vec![8].into(), + access_list: Default::default(), + }; + let sig = Signature::test_signature(); + let tx_signed = tx.into_signed(sig); + let envelope: ScrollTxEnvelope = tx_signed.into(); + let encoded = envelope.encoded_2718(); + let mut slice = encoded.as_slice(); + let decoded = ScrollTxEnvelope::decode_2718(&mut slice).unwrap(); + assert!(matches!(decoded, ScrollTxEnvelope::Eip1559(_))); + } +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/l1_message.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/l1_message.rs new file mode 100644 index 0000000000000000000000000000000000000000..ec743db4f516929b5fe47d5a87c454455fb5b336 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/l1_message.rs @@ -0,0 +1,523 @@ +//! Scroll L1 message transaction + +use crate::ScrollTxType; +use std::vec::Vec; + +use alloy_consensus::{Sealable, Transaction, Typed2718}; +use alloy_eips::eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}; +use alloy_primitives::{ + keccak256, + private::alloy_rlp::{Encodable, Header}, + Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256, +}; +use alloy_rlp::Decodable; +#[cfg(any(test, feature = "reth-codec"))] +use {reth_codecs::Compact, reth_codecs_derive::add_arbitrary_tests}; + +/// L1 message transaction type id, 0x7e in hex. +pub const L1_MESSAGE_TRANSACTION_TYPE: u8 = 126; + +/// A message transaction sent from the settlement layer to the L2 for execution. +/// +/// The signature of the L1 message is already verified on the L1 and as such doesn't contain +/// a signature field. Gas for the transaction execution on Scroll is already paid for on the L1. +/// +/// # Bincode compatibility +/// +/// `bincode` crate doesn't work with optionally serializable serde fields and some of the execution +/// types require optional serialization for RPC compatibility. Since `TxL1Message` doesn't +/// contain optionally serializable fields, no `bincode` compatible bridge implementation is +/// required. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "serde"), + serde(from = "msg_serde::L1MsgSerdeHelper", into = "msg_serde::L1MsgSerdeHelper") +)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), add_arbitrary_tests(compact, rlp))] +pub struct TxL1Message { + /// The queue index of the message in the L1 contract queue. + pub queue_index: u64, + /// The gas limit for the transaction. Gas is paid for when message is sent from the L1. + pub gas_limit: u64, + /// The destination for the transaction. `Address` is used in place of `TxKind` since contract + /// creations aren't allowed via L1 message transactions. + pub to: Address, + /// The value sent. + pub value: U256, + /// The L1 sender of the transaction. + pub sender: Address, + /// The input of the transaction. + pub input: Bytes, +} + +impl TxL1Message { + /// Returns an empty signature for the [`TxL1Message`], which don't include a signature. + pub const fn signature() -> Signature { + Signature::new(U256::ZERO, U256::ZERO, false) + } + + /// Decodes the inner [`TxL1Message`] fields from RLP bytes. + /// + /// NOTE: This assumes a RLP header has already been decoded, and _just_ decodes the following + /// RLP fields in the following order: + /// + /// - `queue_index` + /// - `gas_limit` + /// - `to` + /// - `value` + /// - `input` + /// - `sender` + pub fn rlp_decode_fields(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + Ok(Self { + queue_index: Decodable::decode(buf)?, + gas_limit: Decodable::decode(buf)?, + to: Decodable::decode(buf)?, + value: Decodable::decode(buf)?, + input: Decodable::decode(buf)?, + sender: Decodable::decode(buf)?, + }) + } + + /// Decodes the transaction from RLP bytes. + pub fn rlp_decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + let header = Header::decode(buf)?; + if !header.list { + return Err(alloy_rlp::Error::UnexpectedString); + } + let remaining = buf.len(); + + let this = Self::rlp_decode_fields(buf)?; + + if buf.len() + header.payload_length != remaining { + return Err(alloy_rlp::Error::ListLengthMismatch { + expected: header.payload_length, + got: remaining - buf.len(), + }); + } + + Ok(this) + } + + /// Outputs the length of the transaction's fields, without a RLP header. + pub fn rlp_encoded_fields_length(&self) -> usize { + self.queue_index.length() + + self.gas_limit.length() + + self.to.length() + + self.value.length() + + self.input.0.length() + + self.sender.length() + } + + /// Encode the fields of the transaction without a RLP header. + /// <https://github.com/scroll-tech/go-ethereum/blob/9fff27e4f34fb5097100ed76ee725ce056267f4b/core/types/l1_message_tx.go#L12-L19> + pub fn rlp_encode_fields(&self, out: &mut dyn alloy_rlp::BufMut) { + self.queue_index.encode(out); + self.gas_limit.encode(out); + self.to.encode(out); + self.value.encode(out); + self.input.encode(out); + self.sender.encode(out); + } + + pub(crate) const fn tx_type(&self) -> u8 { + L1_MESSAGE_TRANSACTION_TYPE + } + + /// Create a RLP header for the transaction. + fn rlp_header(&self) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length() } + } + + /// RLP encodes the transaction. + pub fn rlp_encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.rlp_header().encode(out); + self.rlp_encode_fields(out); + } + + /// Get the length of the transaction when RLP encoded. + pub fn rlp_encoded_length(&self) -> usize { + self.rlp_header().length_with_payload() + } + + /// Get the length of the transaction when EIP-2718 encoded. This is the + /// 1 byte type flag + the length of the RLP encoded transaction. + pub fn eip2718_encoded_length(&self) -> usize { + self.rlp_encoded_length() + 1 + } + + /// EIP-2718 encode the transaction. + pub fn eip2718_encode(&self, out: &mut dyn alloy_rlp::BufMut) { + out.put_u8(L1_MESSAGE_TRANSACTION_TYPE); + self.rlp_encode(out) + } + + /// Calculates the in-memory size of the [`TxL1Message`] transaction. + #[inline] + pub fn size(&self) -> usize { + size_of::<u64>() + // queue_index + size_of::<u64>() + // gas_limit + size_of::<Address>() + // to + size_of::<U256>() + // value + self.input.len() + // input + size_of::<Address>() // sender + } + + /// Calculates the hash of the [`TxL1Message`] transaction. + pub fn tx_hash(&self) -> TxHash { + let mut buf = Vec::with_capacity(self.eip2718_encoded_length()); + self.eip2718_encode(&mut buf); + keccak256(&buf) + } +} + +impl Typed2718 for TxL1Message { + fn ty(&self) -> u8 { + ScrollTxType::L1Message as u8 + } +} + +impl Encodable2718 for TxL1Message { + fn type_flag(&self) -> Option<u8> { + Some(self.tx_type()) + } + + fn encode_2718_len(&self) -> usize { + self.eip2718_encoded_length() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + out.put_u8(self.tx_type()); + self.rlp_encode(out); + } +} + +impl Decodable2718 for TxL1Message { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> { + if ty != L1_MESSAGE_TRANSACTION_TYPE { + return Err(Eip2718Error::UnexpectedType(ty)); + } + let tx = Self::rlp_decode(buf)?; + Ok(tx) + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> { + let tx = Self::decode(buf)?; + Ok(tx) + } +} + +impl Encodable for TxL1Message { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.rlp_encode(out) + } + + fn length(&self) -> usize { + self.rlp_encoded_length() + } +} + +impl Decodable for TxL1Message { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + Self::rlp_decode(buf) + } +} + +impl Transaction for TxL1Message { + fn chain_id(&self) -> Option<ChainId> { + None + } + + fn nonce(&self) -> u64 { + 0u64 + } + + fn gas_limit(&self) -> u64 { + self.gas_limit + } + + fn gas_price(&self) -> Option<u128> { + None + } + + fn max_fee_per_gas(&self) -> u128 { + 0 + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + None + } + + fn max_fee_per_blob_gas(&self) -> Option<u128> { + None + } + + fn priority_fee_or_price(&self) -> u128 { + 0 + } + + fn effective_gas_price(&self, _base_fee: Option<u64>) -> u128 { + 0 + } + + fn is_dynamic_fee(&self) -> bool { + false + } + + fn kind(&self) -> TxKind { + TxKind::Call(self.to) + } + + fn is_create(&self) -> bool { + false + } + + fn value(&self) -> U256 { + self.value + } + + fn input(&self) -> &Bytes { + &self.input + } + + fn access_list(&self) -> Option<&alloy_eips::eip2930::AccessList> { + None + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + None + } + + fn authorization_list(&self) -> Option<&[alloy_eips::eip7702::SignedAuthorization]> { + None + } +} + +impl Sealable for TxL1Message { + fn hash_slow(&self) -> B256 { + self.tx_hash() + } +} + +#[cfg(any(test, feature = "serde"))] +mod msg_serde { + use super::*; + use serde::{Deserialize, Serialize}; + + /// Helper struct to serialize/deserialize the `TxL1Message` with a `nonce` field. + #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] + #[serde(rename_all = "camelCase")] + pub struct L1MsgSerdeHelper { + #[serde(with = "alloy_serde::quantity")] + queue_index: u64, + #[serde(with = "alloy_serde::quantity", rename = "gas")] + gas_limit: u64, + to: Address, + value: U256, + sender: Address, + input: Bytes, + #[serde(default, with = "alloy_serde::quantity")] + nonce: u64, + } + + impl From<L1MsgSerdeHelper> for TxL1Message { + fn from(helper: L1MsgSerdeHelper) -> Self { + Self { + queue_index: helper.queue_index, + gas_limit: helper.gas_limit, + to: helper.to, + value: helper.value, + sender: helper.sender, + input: helper.input, + } + } + } + + impl From<TxL1Message> for L1MsgSerdeHelper { + fn from(helper: TxL1Message) -> Self { + Self { + queue_index: helper.queue_index, + gas_limit: helper.gas_limit, + to: helper.to, + value: helper.value, + sender: helper.sender, + input: helper.input, + nonce: 0, + } + } + } +} + +/// Scroll specific transaction fields +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "camelCase"))] +pub struct ScrollL1MessageTransactionFields { + /// The index of the transaction in the message queue. + #[cfg_attr(any(test, feature = "serde"), serde(with = "alloy_serde::quantity"))] + pub queue_index: u64, + /// The sender of the transaction on the L1. + pub sender: Address, +} + +/// L1 message transactions don't have a signature, however, we include an empty signature in the +/// response for better compatibility. +/// +/// This function can be used as `serialize_with` serde attribute for the [`TxL1Message`] and will +/// flatten [`TxL1Message::signature`] into response. +/// +/// <https://github.com/scroll-tech/go-ethereum/blob/develop/core/types/l1_message_tx.go#L51>. +#[cfg(feature = "serde")] +pub fn serde_l1_message_tx_rpc<T: serde::Serialize, S: serde::Serializer>( + value: &T, + serializer: S, +) -> Result<S::Ok, S::Error> { + use serde::Serialize; + + #[derive(Serialize)] + struct SerdeHelper<'a, T> { + #[serde(flatten)] + value: &'a T, + #[serde(flatten)] + signature: Signature, + } + + SerdeHelper { value, signature: TxL1Message::signature() }.serialize(serializer) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::{address, bytes, hex, Bytes, U256}; + use alloy_rlp::BytesMut; + use arbitrary::Arbitrary; + use rand::Rng; + use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; + + #[test] + fn test_serde_roundtrip() { + let original = TxL1Message { + queue_index: 100, + gas_limit: 1234, + to: Address::random(), + value: U256::random(), + sender: Address::random(), + input: bytes!("deadbeef"), + }; + let json = serde_json::to_value(&original).expect("Failed to serialize"); + assert_eq!(json.get("nonce"), Some(&serde_json::Value::String("0x0".to_string()))); + + let deserialized: TxL1Message = + serde_json::from_value(json).expect("Failed to deserialize"); + assert_eq!(original, deserialized); + } + + #[test] + fn test_rlp_roundtrip() { + // <https://scrollscan.com/tx/0xace7103cc22a372c81cda04e15442a721cd3d5d64eda2e1578ba310d91597d97> + let bytes = Bytes::from_static(&hex!("7ef9015a830e7991831e848094781e90f1c8fc4611c9b7497c3b47f99ef6969cbc80b901248ef1332e000000000000000000000000c186fa914353c44b2e33ebe05f21846f1048beda0000000000000000000000003bad7ad0728f9917d1bf08af5782dcbd516cdd96000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e799100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000044493a4f8411b3f3d662006b9bf68884e71f1fc0f8ea04e4cb188354738202c3e34a473b93000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000947885bcbd5cecef1336b5300fb5186a12ddd8c478")); + let tx_a = TxL1Message::decode(&mut bytes[1..].as_ref()).unwrap(); + let mut buf_a = BytesMut::default(); + tx_a.encode(&mut buf_a); + assert_eq!(&buf_a[..], &bytes[1..]); + } + + #[test] + fn test_encode_decode_fields() { + let original = TxL1Message { + queue_index: 100, + gas_limit: 0, + to: Address::default(), + value: U256::default(), + sender: Address::default(), + input: Bytes::default(), + }; + let mut buffer = BytesMut::new(); + original.rlp_encode_fields(&mut buffer); + let decoded = TxL1Message::rlp_decode_fields(&mut &buffer[..]).expect("Failed to decode"); + + assert_eq!(original, decoded); + } + + #[test] + fn test_encode_with_and_without_header() { + let tx_deposit = TxL1Message { + queue_index: 0, + gas_limit: 50000, + to: Address::default(), + value: U256::default(), + sender: Address::default(), + input: Bytes::default(), + }; + + let mut buffer_with_header = BytesMut::new(); + tx_deposit.encode(&mut buffer_with_header); + + let mut buffer_without_header = BytesMut::new(); + tx_deposit.rlp_encode_fields(&mut buffer_without_header); + + assert!(buffer_with_header.len() > buffer_without_header.len()); + } + + #[test] + fn test_payload_length() { + let tx_deposit = TxL1Message { + queue_index: 0, + gas_limit: 50000, + to: Address::default(), + value: U256::default(), + sender: Address::default(), + input: Bytes::default(), + }; + + assert!(tx_deposit.size() > tx_deposit.rlp_encoded_fields_length()); + } + + #[test] + fn test_deserialize_hex_to_u64() { + let rpc_tx = r#"{"gas":"0x1e8480","input":"0x8ef1332e000000000000000000000000c186fa914353c44b2e33ebe05f21846f1048beda0000000000000000000000003bad7ad0728f9917d1bf08af5782dcbd516cdd96000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e7ba000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000044493a4f846ffc1507cbfe98a2b0ba1f06ea7e4eb749c001f78f6cb5540daa556a0566322a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","to":"0x781e90f1c8fc4611c9b7497c3b47f99ef6969cbc","value":"0x0","sender":"0x7885bcbd5cecef1336b5300fb5186a12ddd8c478","queueIndex":"0xe7ba0"}"#; + // let obj: TxL1Message = serde_json::from_str(rpc_tx).unwrap(); + let obj = serde_json::from_str::<TxL1Message>(rpc_tx).unwrap(); + assert_eq!(obj.queue_index, 0xe7ba0); + } + + #[test] + fn test_bincode_roundtrip() { + let mut bytes = [0u8; 1024]; + rand::rng().fill(bytes.as_mut_slice()); + let tx = TxL1Message::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + + let encoded = bincode::serialize(&tx).unwrap(); + let decoded: TxL1Message = bincode::deserialize(&encoded).unwrap(); + assert_eq!(decoded, tx); + } + + #[test] + fn test_eip2718_encode() { + let tx = + TxL1Message { + queue_index: 947883, + gas_limit: 2000000, + to: address!("781e90f1c8fc4611c9b7497c3b47f99ef6969cbc"), + value: U256::ZERO, + sender: address!("7885bcbd5cecef1336b5300fb5186a12ddd8c478"), + input: bytes!("8ef1332e000000000000000000000000c186fa914353c44b2e33ebe05f21846f1048beda0000000000000000000000003bad7ad0728f9917d1bf08af5782dcbd516cdd96000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e76ab00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000044493a4f84f464e58d4bfa93bcc57abfb14dbe1b8ff46cd132b5709aab227f269727943d2f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + } + ; + let bytes = Bytes::from_static(&hex!("7ef9015a830e76ab831e848094781e90f1c8fc4611c9b7497c3b47f99ef6969cbc80b901248ef1332e000000000000000000000000c186fa914353c44b2e33ebe05f21846f1048beda0000000000000000000000003bad7ad0728f9917d1bf08af5782dcbd516cdd96000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e76ab00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000044493a4f84f464e58d4bfa93bcc57abfb14dbe1b8ff46cd132b5709aab227f269727943d2f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000947885bcbd5cecef1336b5300fb5186a12ddd8c478")); + + let mut encoded = BytesMut::default(); + tx.encode_2718(&mut encoded); + + assert_eq!(encoded, bytes.as_ref()) + } + + #[test] + fn test_compaction_backwards_compatibility() { + assert_eq!(TxL1Message::bitflag_encoded_bytes(), 2); + validate_bitflag_backwards_compat!(TxL1Message, UnusedBits::NotZero); + } +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/meta.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/meta.rs new file mode 100644 index 0000000000000000000000000000000000000000..d1ffc5f5acd663a9c042645dbb4d6e576edebfb2 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/meta.rs @@ -0,0 +1,28 @@ +use alloy_consensus::transaction::TransactionInfo; +use alloy_primitives::U256; + +/// Additional receipt metadata required for Scroll transactions. +/// +/// These fields are used to provide additional context for in RPC responses. +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] +pub struct ScrollAdditionalInfo { + /// Only present in RPC responses. + pub l1_fee: U256, +} + +/// Additional fields in the context of a block that contains this transaction. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct ScrollTransactionInfo { + /// Additional transaction information. + pub inner: TransactionInfo, + /// Additional metadata for Scroll. + pub additional_info: ScrollAdditionalInfo, +} + +impl ScrollTransactionInfo { + /// Creates a new [`ScrollTransactionInfo`] with the given [`TransactionInfo`] and + /// [`ScrollAdditionalInfo`]. + pub const fn new(inner: TransactionInfo, additional_info: ScrollAdditionalInfo) -> Self { + Self { inner, additional_info } + } +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/mod.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..b2f08383ce63039af0eacf994da0805526ca9485 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/mod.rs @@ -0,0 +1,28 @@ +//! Transaction types for Scroll. + +mod tx_type; +pub use tx_type::{ScrollTxType, L1_MESSAGE_TX_TYPE_ID}; + +mod envelope; +pub use envelope::{ScrollTransaction, ScrollTxEnvelope}; + +mod l1_message; +pub use l1_message::{ScrollL1MessageTransactionFields, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE}; + +mod meta; +pub use meta::{ScrollAdditionalInfo, ScrollTransactionInfo}; + +mod typed; +pub use typed::ScrollTypedTransaction; + +mod pooled; +pub use pooled::ScrollPooledTransaction; + +#[cfg(feature = "serde")] +pub use l1_message::serde_l1_message_tx_rpc; + +/// Bincode-compatible serde implementations for transaction types. +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub(super) mod serde_bincode_compat { + pub use super::envelope::serde_bincode_compat::*; +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/pooled.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/pooled.rs new file mode 100644 index 0000000000000000000000000000000000000000..e8579b86b133059968886002e38740bcd7655b95 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/pooled.rs @@ -0,0 +1,563 @@ +//! Defines the exact transaction variants that are allowed to be propagated over the eth p2p +//! protocol in op. + +use crate::{ScrollTxEnvelope, ScrollTxType}; +use alloy_consensus::{ + error::ValueError, + transaction::{RlpEcdsaDecodableTx, TxEip1559, TxEip2930, TxHashRef, TxLegacy}, + SignableTransaction, Signed, Transaction, TxEip7702, TxEnvelope, Typed2718, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{bytes, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256}; +use alloy_rlp::{Decodable, Encodable, Header}; +use core::hash::{Hash, Hasher}; + +/// All possible transactions that can be included in a response to `GetPooledTransactions`. +/// A response to `GetPooledTransactions`. This can include a typed signed transaction, but cannot +/// include a deposit transaction or EIP-4844 transaction. +/// +/// The difference between this and the [`ScrollTxEnvelope`] is that this type does not have the +/// L1 message variant, which is not expected to be pooled. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(all(any(test, feature = "arbitrary"), feature = "k256"), derive(arbitrary::Arbitrary))] +pub enum ScrollPooledTransaction { + /// An untagged [`TxLegacy`]. + Legacy(Signed<TxLegacy>), + /// A [`TxEip2930`] transaction tagged with type 1. + Eip2930(Signed<TxEip2930>), + /// A [`TxEip1559`] transaction tagged with type 2. + Eip1559(Signed<TxEip1559>), + /// A [`TxEip7702`] transaction tagged with type 4. + Eip7702(Signed<TxEip7702>), +} + +impl ScrollPooledTransaction { + /// Heavy operation that returns the signature hash over rlp encoded transaction. It is only + /// for signature signing or signer recovery. + pub fn signature_hash(&self) -> B256 { + match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + } + } + + /// Reference to transaction hash. Used to identify transaction. + pub fn hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + } + } + + /// Returns the signature of the transaction. + pub const fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + } + } + + /// The length of the 2718 encoded envelope in network format. This is the + /// length of the header + the length of the type flag and inner encoding. + fn network_len(&self) -> usize { + let mut payload_length = self.encode_2718_len(); + if !self.is_legacy() { + payload_length += Header { list: false, payload_length }.length(); + } + + payload_length + } + + /// Recover the signer of the transaction. + #[cfg(feature = "k256")] + pub fn recover_signer( + &self, + ) -> Result<alloy_primitives::Address, alloy_primitives::SignatureError> { + match self { + Self::Legacy(tx) => tx.recover_signer(), + Self::Eip2930(tx) => tx.recover_signer(), + Self::Eip1559(tx) => tx.recover_signer(), + Self::Eip7702(tx) => tx.recover_signer(), + } + } + + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(out), + Self::Eip2930(tx) => tx.tx().encode_for_signing(out), + Self::Eip1559(tx) => tx.tx().encode_for_signing(out), + Self::Eip7702(tx) => tx.tx().encode_for_signing(out), + } + } + + /// Converts the transaction into the ethereum [`TxEnvelope`]. + pub fn into_envelope(self) -> TxEnvelope { + match self { + Self::Legacy(tx) => tx.into(), + Self::Eip2930(tx) => tx.into(), + Self::Eip1559(tx) => tx.into(), + Self::Eip7702(tx) => tx.into(), + } + } + + /// Converts the transaction into the scroll [`ScrollTxEnvelope`]. + pub fn into_scroll_envelope(self) -> ScrollTxEnvelope { + match self { + Self::Legacy(tx) => tx.into(), + Self::Eip2930(tx) => tx.into(), + Self::Eip1559(tx) => tx.into(), + Self::Eip7702(tx) => tx.into(), + } + } + + /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. + pub const fn as_legacy(&self) -> Option<&TxLegacy> { + match self { + Self::Legacy(tx) => Some(tx.tx()), + _ => None, + } + } + + /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. + pub const fn as_eip2930(&self) -> Option<&TxEip2930> { + match self { + Self::Eip2930(tx) => Some(tx.tx()), + _ => None, + } + } + + /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. + pub const fn as_eip1559(&self) -> Option<&TxEip1559> { + match self { + Self::Eip1559(tx) => Some(tx.tx()), + _ => None, + } + } + + /// Returns the [`TxEip7702`] variant if the transaction is an EIP-1559 transaction. + pub const fn as_eip7702(&self) -> Option<&TxEip7702> { + match self { + Self::Eip7702(tx) => Some(tx.tx()), + _ => None, + } + } +} + +impl From<Signed<TxLegacy>> for ScrollPooledTransaction { + fn from(v: Signed<TxLegacy>) -> Self { + Self::Legacy(v) + } +} + +impl From<Signed<TxEip2930>> for ScrollPooledTransaction { + fn from(v: Signed<TxEip2930>) -> Self { + Self::Eip2930(v) + } +} + +impl From<Signed<TxEip1559>> for ScrollPooledTransaction { + fn from(v: Signed<TxEip1559>) -> Self { + Self::Eip1559(v) + } +} + +impl From<Signed<TxEip7702>> for ScrollPooledTransaction { + fn from(v: Signed<TxEip7702>) -> Self { + Self::Eip7702(v) + } +} + +impl Hash for ScrollPooledTransaction { + fn hash<H: Hasher>(&self, state: &mut H) { + self.trie_hash().hash(state); + } +} + +impl Encodable for ScrollPooledTransaction { + /// This encodes the transaction _with_ the signature, and an rlp header. + /// + /// For legacy transactions, it encodes the transaction data: + /// `rlp(tx-data)` + /// + /// For EIP-2718 typed transactions, it encodes the transaction type followed by the rlp of the + /// transaction: + /// `rlp(tx-type || rlp(tx-data))` + fn encode(&self, out: &mut dyn bytes::BufMut) { + self.network_encode(out); + } + + fn length(&self) -> usize { + self.network_len() + } +} + +impl Decodable for ScrollPooledTransaction { + /// Decodes an enveloped [`ScrollPooledTransaction`]. + /// + /// CAUTION: this expects that `buf` is `rlp(tx_type || rlp(tx-data))` + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + Ok(Self::network_decode(buf)?) + } +} + +impl Encodable2718 for ScrollPooledTransaction { + fn type_flag(&self) -> Option<u8> { + match self { + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(0x01), + Self::Eip1559(_) => Some(0x02), + Self::Eip7702(_) => Some(0x04), + } + } + + fn encode_2718_len(&self) -> usize { + match self { + Self::Legacy(tx) => tx.eip2718_encoded_length(), + Self::Eip2930(tx) => tx.eip2718_encoded_length(), + Self::Eip1559(tx) => tx.eip2718_encoded_length(), + Self::Eip7702(tx) => tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + match self { + Self::Legacy(tx) => tx.eip2718_encode(out), + Self::Eip2930(tx) => tx.eip2718_encode(out), + Self::Eip1559(tx) => tx.eip2718_encode(out), + Self::Eip7702(tx) => tx.eip2718_encode(out), + } + } + + fn trie_hash(&self) -> B256 { + *self.hash() + } +} + +impl Decodable2718 for ScrollPooledTransaction { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> { + match ty.try_into().map_err(|_| alloy_rlp::Error::Custom("unexpected tx type"))? { + ScrollTxType::Eip2930 => Ok(TxEip2930::rlp_decode_signed(buf)?.into()), + ScrollTxType::Eip1559 => Ok(TxEip1559::rlp_decode_signed(buf)?.into()), + ScrollTxType::Eip7702 => Ok(TxEip7702::rlp_decode_signed(buf)?.into()), + ScrollTxType::Legacy => Err(Eip2718Error::UnexpectedType(ScrollTxType::Legacy.into())), + ScrollTxType::L1Message => { + Err(Eip2718Error::UnexpectedType(ScrollTxType::L1Message.into())) + } + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> { + TxLegacy::rlp_decode_signed(buf).map(Into::into).map_err(Into::into) + } +} + +impl Transaction for ScrollPooledTransaction { + fn chain_id(&self) -> Option<ChainId> { + match self { + Self::Legacy(tx) => tx.tx().chain_id(), + Self::Eip2930(tx) => tx.tx().chain_id(), + Self::Eip1559(tx) => tx.tx().chain_id(), + Self::Eip7702(tx) => tx.tx().chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().gas_limit(), + Self::Eip2930(tx) => tx.tx().gas_limit(), + Self::Eip1559(tx) => tx.tx().gas_limit(), + Self::Eip7702(tx) => tx.tx().gas_limit(), + } + } + + fn gas_price(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.tx().gas_price(), + Self::Eip2930(tx) => tx.tx().gas_price(), + Self::Eip1559(tx) => tx.tx().gas_price(), + Self::Eip7702(tx) => tx.tx().gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().priority_fee_or_price(), + Self::Eip2930(tx) => tx.tx().priority_fee_or_price(), + Self::Eip1559(tx) => tx.tx().priority_fee_or_price(), + Self::Eip7702(tx) => tx.tx().priority_fee_or_price(), + } + } + + fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.tx().effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_dynamic_fee(), + Self::Eip2930(tx) => tx.tx().is_dynamic_fee(), + Self::Eip1559(tx) => tx.tx().is_dynamic_fee(), + Self::Eip7702(tx) => tx.tx().is_dynamic_fee(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.tx().kind(), + Self::Eip2930(tx) => tx.tx().kind(), + Self::Eip1559(tx) => tx.tx().kind(), + Self::Eip7702(tx) => tx.tx().kind(), + } + } + + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_create(), + Self::Eip2930(tx) => tx.tx().is_create(), + Self::Eip1559(tx) => tx.tx().is_create(), + Self::Eip7702(tx) => tx.tx().is_create(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.tx().value(), + Self::Eip2930(tx) => tx.tx().value(), + Self::Eip1559(tx) => tx.tx().value(), + Self::Eip7702(tx) => tx.tx().value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.tx().input(), + Self::Eip2930(tx) => tx.tx().input(), + Self::Eip1559(tx) => tx.tx().input(), + Self::Eip7702(tx) => tx.tx().input(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.tx().access_list(), + Self::Eip2930(tx) => tx.tx().access_list(), + Self::Eip1559(tx) => tx.tx().access_list(), + Self::Eip7702(tx) => tx.tx().access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip2930(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip1559(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip7702(tx) => tx.tx().blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.tx().authorization_list(), + Self::Eip2930(tx) => tx.tx().authorization_list(), + Self::Eip1559(tx) => tx.tx().authorization_list(), + Self::Eip7702(tx) => tx.tx().authorization_list(), + } + } +} + +impl Typed2718 for ScrollPooledTransaction { + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + } + } +} + +impl From<ScrollPooledTransaction> for TxEnvelope { + fn from(tx: ScrollPooledTransaction) -> Self { + tx.into_envelope() + } +} + +impl From<ScrollPooledTransaction> for ScrollTxEnvelope { + fn from(tx: ScrollPooledTransaction) -> Self { + tx.into_scroll_envelope() + } +} + +impl TryFrom<ScrollTxEnvelope> for ScrollPooledTransaction { + type Error = ValueError<ScrollTxEnvelope>; + + fn try_from(value: ScrollTxEnvelope) -> Result<Self, Self::Error> { + value.try_into_pooled() + } +} + +#[cfg(feature = "k256")] +impl alloy_consensus::transaction::SignerRecoverable for ScrollPooledTransaction { + fn recover_signer( + &self, + ) -> Result<alloy_primitives::Address, alloy_consensus::crypto::RecoveryError> { + let signature_hash = self.signature_hash(); + alloy_consensus::crypto::secp256k1::recover_signer(self.signature(), signature_hash) + } + + fn recover_signer_unchecked( + &self, + ) -> Result<alloy_primitives::Address, alloy_consensus::crypto::RecoveryError> { + let signature_hash = self.signature_hash(); + alloy_consensus::crypto::secp256k1::recover_signer_unchecked( + self.signature(), + signature_hash, + ) + } +} + +impl TxHashRef for ScrollPooledTransaction { + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{address, hex}; + use bytes::Bytes; + + #[test] + fn invalid_legacy_pooled_decoding_input_too_short() { + let input_too_short = [ + // this should fail because the payload length is longer than expected + &hex!("d90b0280808bc5cd028083c5cdfd9e407c56565656")[..], + // these should fail decoding + // + // The `c1` at the beginning is a list header, and the rest is a valid legacy + // transaction, BUT the payload length of the list header is 1, and the payload is + // obviously longer than one byte. + &hex!("c10b02808083c5cd028883c5cdfd9e407c56565656"), + &hex!("c10b0280808bc5cd028083c5cdfd9e407c56565656"), + // this one is 19 bytes, and the buf is long enough, but the transaction will not + // consume that many bytes. + &hex!("d40b02808083c5cdeb8783c5acfd9e407c5656565656"), + &hex!("d30102808083c5cd02887dc5cdfd9e64fd9e407c56"), + ]; + + for hex_data in &input_too_short { + let input_rlp = &mut &hex_data[..]; + let res = ScrollPooledTransaction::decode(input_rlp); + + assert!( + res.is_err(), + "expected err after decoding rlp input: {:x?}", + Bytes::copy_from_slice(hex_data) + ); + + // this is a legacy tx so we can attempt the same test with decode_enveloped + let input_rlp = &mut &hex_data[..]; + let res = ScrollPooledTransaction::decode_2718(input_rlp); + + assert!( + res.is_err(), + "expected err after decoding enveloped rlp input: {:x?}", + Bytes::copy_from_slice(hex_data) + ); + } + } + + // <https://holesky.etherscan.io/tx/0x7f60faf8a410a80d95f7ffda301d5ab983545913d3d789615df3346579f6c849> + #[test] + fn decode_eip1559_enveloped() { + let data = hex!("02f903d382426882ba09832dc6c0848674742682ed9694714b6a4ea9b94a8a7d9fd362ed72630688c8898c80b90364492d24749189822d8512430d3f3ff7a2ede675ac08265c08e2c56ff6fdaa66dae1cdbe4a5d1d7809f3e99272d067364e597542ac0c369d69e22a6399c3e9bee5da4b07e3f3fdc34c32c3d88aa2268785f3e3f8086df0934b10ef92cfffc2e7f3d90f5e83302e31382e302d64657600000000000000000000000000000000000000000000569e75fc77c1a856f6daaf9e69d8a9566ca34aa47f9133711ce065a571af0cfd000000000000000000000000e1e210594771824dad216568b91c9cb4ceed361c00000000000000000000000000000000000000000000000000000000000546e00000000000000000000000000000000000000000000000000000000000e4e1c00000000000000000000000000000000000000000000000000000000065d6750c00000000000000000000000000000000000000000000000000000000000f288000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002cf600000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000000f1628e56fa6d8c50e5b984a58c0df14de31c7b857ce7ba499945b99252976a93d06dcda6776fc42167fbe71cb59f978f5ef5b12577a90b132d14d9c6efa528076f0161d7bf03643cfc5490ec5084f4a041db7f06c50bd97efa08907ba79ddcac8b890f24d12d8db31abbaaf18985d54f400449ee0559a4452afe53de5853ce090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000064ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000c080a01428023fc54a27544abc421d5d017b9a7c5936ad501cbdecd0d9d12d04c1a033a0753104bbf1c87634d6ff3f0ffa0982710612306003eb022363b57994bdef445a" +); + + let res = ScrollPooledTransaction::decode_2718(&mut &data[..]).unwrap(); + assert_eq!(res.to(), Some(address!("714b6a4ea9b94a8a7d9fd362ed72630688c8898c"))); + } + + #[test] + fn legacy_valid_pooled_decoding() { + // d3 <- payload length, d3 - c0 = 0x13 = 19 + // 0b <- nonce + // 02 <- gas_price + // 80 <- gas_limit + // 80 <- to (Create) + // 83 c5cdeb <- value + // 87 83c5acfd9e407c <- input + // 56 <- v (eip155, so modified with a chain id) + // 56 <- r + // 56 <- s + let data = &hex!("d30b02808083c5cdeb8783c5acfd9e407c565656")[..]; + + let input_rlp = &mut &data[..]; + let res = ScrollPooledTransaction::decode(input_rlp); + assert!(res.is_ok()); + assert!(input_rlp.is_empty()); + + // we can also decode_enveloped + let res = ScrollPooledTransaction::decode_2718(&mut &data[..]); + assert!(res.is_ok()); + } +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/tx_type.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/tx_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..f980f3d6872db74ed351a991c6305812a5caea57 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/tx_type.rs @@ -0,0 +1,211 @@ +//! Contains the transaction type identifier for Scroll. + +use alloy_consensus::Typed2718; +use alloy_eips::eip2718::Eip2718Error; +use alloy_primitives::{U64, U8}; +use alloy_rlp::{BufMut, Decodable, Encodable}; +use derive_more::Display; +#[cfg(feature = "reth-codec")] +use reth_codecs::{ + __private::bytes, + txtype::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, + }, + Compact, +}; + +/// Identifier for an Scroll L1 message transaction +pub const L1_MESSAGE_TX_TYPE_ID: u8 = 126; // 0x7E + +/// Scroll `TransactionType` flags as specified in <https://docs.scroll.io/en/technology/chain/transactions/>. +#[repr(u8)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Display)] +pub enum ScrollTxType { + /// Legacy transaction type. + #[display("legacy")] + Legacy = 0, + /// EIP-2930 transaction type. + #[display("eip2930")] + Eip2930 = 1, + /// EIP-1559 transaction type. + #[display("eip1559")] + Eip1559 = 2, + /// EIP-7702 transaction type. + #[display("eip7702")] + Eip7702 = 4, + /// L1 message transaction type. + #[display("l1_message")] + L1Message = L1_MESSAGE_TX_TYPE_ID, +} + +impl ScrollTxType { + /// List of all variants. + pub const ALL: [Self; 5] = + [Self::Legacy, Self::Eip1559, Self::Eip2930, Self::Eip7702, Self::L1Message]; +} + +#[cfg(any(test, feature = "arbitrary"))] +impl arbitrary::Arbitrary<'_> for ScrollTxType { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { + let i = u.choose_index(Self::ALL.len())?; + Ok(Self::ALL[i]) + } +} + +impl From<ScrollTxType> for U8 { + fn from(tx_type: ScrollTxType) -> Self { + Self::from(u8::from(tx_type)) + } +} + +impl From<ScrollTxType> for u8 { + fn from(v: ScrollTxType) -> Self { + v as Self + } +} + +impl TryFrom<u8> for ScrollTxType { + type Error = Eip2718Error; + + fn try_from(value: u8) -> Result<Self, Self::Error> { + Ok(match value { + x if x == Self::Legacy as u8 => Self::Legacy, + x if x == Self::Eip2930 as u8 => Self::Eip2930, + x if x == Self::Eip1559 as u8 => Self::Eip1559, + x if x == Self::Eip7702 as u8 => Self::Eip7702, + x if x == Self::L1Message as u8 => Self::L1Message, + _ => return Err(Eip2718Error::UnexpectedType(value)), + }) + } +} + +impl TryFrom<u64> for ScrollTxType { + type Error = &'static str; + + fn try_from(value: u64) -> Result<Self, Self::Error> { + let err = || "invalid tx type"; + let value: u8 = value.try_into().map_err(|_| err())?; + Self::try_from(value).map_err(|_| err()) + } +} + +impl TryFrom<U64> for ScrollTxType { + type Error = &'static str; + + fn try_from(value: U64) -> Result<Self, Self::Error> { + value.to::<u64>().try_into() + } +} + +impl PartialEq<u8> for ScrollTxType { + fn eq(&self, other: &u8) -> bool { + (*self as u8) == *other + } +} + +impl PartialEq<ScrollTxType> for u8 { + fn eq(&self, other: &ScrollTxType) -> bool { + *self == *other as Self + } +} + +impl Encodable for ScrollTxType { + fn encode(&self, out: &mut dyn BufMut) { + (*self as u8).encode(out); + } + + fn length(&self) -> usize { + 1 + } +} + +impl Decodable for ScrollTxType { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + let ty = u8::decode(buf)?; + + Self::try_from(ty).map_err(|_| alloy_rlp::Error::Custom("invalid transaction type")) + } +} + +#[cfg(feature = "reth-codec")] +impl Compact for ScrollTxType { + fn to_compact<B>(&self, buf: &mut B) -> usize + where + B: BufMut + AsMut<[u8]>, + { + match self { + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + Self::Eip7702 => { + buf.put_u8(alloy_eips::eip7702::constants::EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + Self::L1Message => { + buf.put_u8(L1_MESSAGE_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + alloy_eips::eip7702::constants::EIP7702_TX_TYPE_ID => Self::Eip7702, + L1_MESSAGE_TX_TYPE_ID => Self::L1Message, + _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for TxType: {identifier}"), + }, + buf, + ) + } +} + +impl Typed2718 for ScrollTxType { + fn ty(&self) -> u8 { + (*self).into() + } +} + +#[cfg(test)] +mod tests { + use super::*; + extern crate alloc; + use alloc::{vec, vec::Vec}; + + #[test] + fn test_all_tx_types() { + assert_eq!(ScrollTxType::ALL.len(), 5); + let all = vec![ + ScrollTxType::Legacy, + ScrollTxType::Eip1559, + ScrollTxType::Eip2930, + ScrollTxType::Eip7702, + ScrollTxType::L1Message, + ]; + assert_eq!(ScrollTxType::ALL.to_vec(), all); + } + + #[test] + fn tx_type_roundtrip() { + for &tx_type in &ScrollTxType::ALL { + let mut buf = Vec::new(); + tx_type.encode(&mut buf); + let decoded = ScrollTxType::decode(&mut &buf[..]).unwrap(); + assert_eq!(tx_type, decoded); + } + } +}
diff --git reth/crates/scroll/alloy/consensus/src/transaction/typed.rs scroll-reth/crates/scroll/alloy/consensus/src/transaction/typed.rs new file mode 100644 index 0000000000000000000000000000000000000000..9bd6a4cb76c8b8d0bd4c05655ef22cb21cb7aa61 --- /dev/null +++ scroll-reth/crates/scroll/alloy/consensus/src/transaction/typed.rs @@ -0,0 +1,597 @@ +use crate::{ScrollTxEnvelope, ScrollTxType, TxL1Message}; +use alloy_consensus::{ + transaction::RlpEcdsaEncodableTx, SignableTransaction, Signed, Transaction, TxEip1559, + TxEip2930, TxEip7702, TxLegacy, Typed2718, +}; +use alloy_eips::{eip2930::AccessList, Encodable2718}; +use alloy_primitives::{bytes::BufMut, Address, Bytes, ChainId, Signature, TxHash, TxKind, B256}; +#[cfg(feature = "reth-codec")] +use { + reth_codecs::{Compact, __private::bytes}, + reth_codecs_derive::generate_tests, +}; + +/// The `TypedTransaction` enum represents all Ethereum transaction request types, modified for +/// Scroll +/// +/// Its variants correspond to specific allowed transactions: +/// 1. `Legacy` (pre-EIP2718) [`TxLegacy`] +/// 2. `EIP2930` (state access lists) [`TxEip2930`] +/// 3. `EIP1559` [`TxEip1559`] +/// 4. `Eip7702` [`TxEip7702`] +/// 5. `L1Message` [`TxL1Message`] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "serde", + serde( + from = "serde_from::MaybeTaggedTypedTransaction", + into = "serde_from::TaggedTypedTransaction" + ) +)] +pub enum ScrollTypedTransaction { + /// Legacy transaction + Legacy(TxLegacy), + /// EIP-2930 transaction + Eip2930(TxEip2930), + /// EIP-1559 transaction + Eip1559(TxEip1559), + /// EIP-7702 transaction + Eip7702(TxEip7702), + /// Scroll L1 message transaction + L1Message(TxL1Message), +} + +impl From<TxLegacy> for ScrollTypedTransaction { + fn from(tx: TxLegacy) -> Self { + Self::Legacy(tx) + } +} + +impl From<TxEip2930> for ScrollTypedTransaction { + fn from(tx: TxEip2930) -> Self { + Self::Eip2930(tx) + } +} + +impl From<TxEip1559> for ScrollTypedTransaction { + fn from(tx: TxEip1559) -> Self { + Self::Eip1559(tx) + } +} + +impl From<TxEip7702> for ScrollTypedTransaction { + fn from(tx: TxEip7702) -> Self { + Self::Eip7702(tx) + } +} + +impl From<TxL1Message> for ScrollTypedTransaction { + fn from(tx: TxL1Message) -> Self { + Self::L1Message(tx) + } +} + +impl From<ScrollTxEnvelope> for ScrollTypedTransaction { + fn from(envelope: ScrollTxEnvelope) -> Self { + match envelope { + ScrollTxEnvelope::Legacy(tx) => Self::Legacy(tx.strip_signature()), + ScrollTxEnvelope::Eip2930(tx) => Self::Eip2930(tx.strip_signature()), + ScrollTxEnvelope::Eip1559(tx) => Self::Eip1559(tx.strip_signature()), + ScrollTxEnvelope::Eip7702(tx) => Self::Eip7702(tx.strip_signature()), + ScrollTxEnvelope::L1Message(tx) => Self::L1Message(tx.into_inner()), + } + } +} + +impl ScrollTypedTransaction { + /// Return the [`ScrollTxType`] of the inner txn. + pub const fn tx_type(&self) -> ScrollTxType { + match self { + Self::Legacy(_) => ScrollTxType::Legacy, + Self::Eip2930(_) => ScrollTxType::Eip2930, + Self::Eip1559(_) => ScrollTxType::Eip1559, + Self::Eip7702(_) => ScrollTxType::Eip7702, + Self::L1Message(_) => ScrollTxType::L1Message, + } + } + + /// Return the inner legacy transaction if it exists. + pub const fn legacy(&self) -> Option<&TxLegacy> { + match self { + Self::Legacy(tx) => Some(tx), + _ => None, + } + } + + /// Return the inner EIP-2930 transaction if it exists. + pub const fn eip2930(&self) -> Option<&TxEip2930> { + match self { + Self::Eip2930(tx) => Some(tx), + _ => None, + } + } + + /// Return the inner EIP-1559 transaction if it exists. + pub const fn eip1559(&self) -> Option<&TxEip1559> { + match self { + Self::Eip1559(tx) => Some(tx), + _ => None, + } + } + + /// Return the inner EIP-1559 transaction if it exists. + pub const fn eip7702(&self) -> Option<&TxEip7702> { + match self { + Self::Eip7702(tx) => Some(tx), + _ => None, + } + } + + /// Return the inner l1 message if it exists. + pub const fn l1_message(&self) -> Option<&TxL1Message> { + match self { + Self::L1Message(tx) => Some(tx), + _ => None, + } + } + + /// Calculates the signing hash for the transaction. + pub fn signature_hash(&self) -> B256 { + match self { + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::L1Message(_) => B256::ZERO, + } + } +} + +impl Typed2718 for ScrollTypedTransaction { + fn ty(&self) -> u8 { + match self { + Self::Legacy(_) => ScrollTxType::Legacy as u8, + Self::Eip2930(_) => ScrollTxType::Eip2930 as u8, + Self::Eip1559(_) => ScrollTxType::Eip1559 as u8, + Self::Eip7702(_) => ScrollTxType::Eip7702 as u8, + Self::L1Message(_) => ScrollTxType::L1Message as u8, + } + } +} + +impl Transaction for ScrollTypedTransaction { + fn chain_id(&self) -> Option<alloy_primitives::ChainId> { + match self { + Self::Legacy(tx) => tx.chain_id(), + Self::Eip2930(tx) => tx.chain_id(), + Self::Eip1559(tx) => tx.chain_id(), + Self::Eip7702(tx) => tx.chain_id(), + Self::L1Message(tx) => tx.chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.nonce(), + Self::Eip2930(tx) => tx.nonce(), + Self::Eip1559(tx) => tx.nonce(), + Self::Eip7702(tx) => tx.nonce(), + Self::L1Message(tx) => tx.nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.gas_limit(), + Self::Eip2930(tx) => tx.gas_limit(), + Self::Eip1559(tx) => tx.gas_limit(), + Self::Eip7702(tx) => tx.gas_limit(), + Self::L1Message(tx) => tx.gas_limit(), + } + } + + fn gas_price(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.gas_price(), + Self::Eip2930(tx) => tx.gas_price(), + Self::Eip1559(tx) => tx.gas_price(), + Self::Eip7702(tx) => tx.gas_price(), + Self::L1Message(tx) => tx.gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.max_fee_per_gas(), + Self::Eip2930(tx) => tx.max_fee_per_gas(), + Self::Eip1559(tx) => tx.max_fee_per_gas(), + Self::Eip7702(tx) => tx.max_fee_per_gas(), + Self::L1Message(tx) => tx.max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.max_priority_fee_per_gas(), + Self::L1Message(tx) => tx.max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option<u128> { + match self { + Self::Legacy(tx) => tx.max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.max_fee_per_blob_gas(), + Self::L1Message(tx) => tx.max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.priority_fee_or_price(), + Self::Eip2930(tx) => tx.priority_fee_or_price(), + Self::Eip1559(tx) => tx.priority_fee_or_price(), + Self::Eip7702(tx) => tx.priority_fee_or_price(), + Self::L1Message(tx) => tx.priority_fee_or_price(), + } + } + + fn to(&self) -> Option<Address> { + match self { + Self::Legacy(tx) => tx.to(), + Self::Eip2930(tx) => tx.to(), + Self::Eip1559(tx) => tx.to(), + Self::Eip7702(tx) => tx.to(), + Self::L1Message(tx) => tx.to(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + Self::L1Message(tx) => tx.kind(), + } + } + + fn value(&self) -> alloy_primitives::U256 { + match self { + Self::Legacy(tx) => tx.value(), + Self::Eip2930(tx) => tx.value(), + Self::Eip1559(tx) => tx.value(), + Self::Eip7702(tx) => tx.value(), + Self::L1Message(tx) => tx.value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.input(), + Self::Eip2930(tx) => tx.input(), + Self::Eip1559(tx) => tx.input(), + Self::Eip7702(tx) => tx.input(), + Self::L1Message(tx) => tx.input(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.access_list(), + Self::Eip2930(tx) => tx.access_list(), + Self::Eip1559(tx) => tx.access_list(), + Self::Eip7702(tx) => tx.access_list(), + Self::L1Message(tx) => tx.access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[alloy_primitives::B256]> { + match self { + Self::Legacy(tx) => tx.blob_versioned_hashes(), + Self::Eip2930(tx) => tx.blob_versioned_hashes(), + Self::Eip1559(tx) => tx.blob_versioned_hashes(), + Self::Eip7702(tx) => tx.blob_versioned_hashes(), + Self::L1Message(tx) => tx.blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[alloy_eips::eip7702::SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.authorization_list(), + Self::Eip2930(tx) => tx.authorization_list(), + Self::Eip1559(tx) => tx.authorization_list(), + Self::Eip7702(tx) => tx.authorization_list(), + Self::L1Message(tx) => tx.authorization_list(), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.is_dynamic_fee(), + Self::Eip2930(tx) => tx.is_dynamic_fee(), + Self::Eip1559(tx) => tx.is_dynamic_fee(), + Self::Eip7702(tx) => tx.is_dynamic_fee(), + Self::L1Message(tx) => tx.is_dynamic_fee(), + } + } + + fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 { + match self { + Self::Legacy(tx) => tx.effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.effective_gas_price(base_fee), + Self::L1Message(tx) => tx.effective_gas_price(base_fee), + } + } + + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.is_create(), + Self::Eip2930(tx) => tx.is_create(), + Self::Eip1559(tx) => tx.is_create(), + Self::Eip7702(tx) => tx.is_create(), + Self::L1Message(tx) => tx.is_create(), + } + } +} + +impl RlpEcdsaEncodableTx for ScrollTypedTransaction { + fn rlp_encoded_fields_length(&self) -> usize { + match self { + Self::Legacy(tx) => tx.rlp_encoded_fields_length(), + Self::Eip2930(tx) => tx.rlp_encoded_fields_length(), + Self::Eip1559(tx) => tx.rlp_encoded_fields_length(), + Self::Eip7702(tx) => tx.rlp_encoded_fields_length(), + Self::L1Message(tx) => tx.rlp_encoded_fields_length(), + } + } + + fn rlp_encode_fields(&self, out: &mut dyn alloy_rlp::BufMut) { + match self { + Self::Legacy(tx) => tx.rlp_encode_fields(out), + Self::Eip2930(tx) => tx.rlp_encode_fields(out), + Self::Eip1559(tx) => tx.rlp_encode_fields(out), + Self::Eip7702(tx) => tx.rlp_encode_fields(out), + Self::L1Message(tx) => tx.rlp_encode_fields(out), + } + } + + fn eip2718_encode_with_type(&self, signature: &Signature, _ty: u8, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::Eip2930(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::Eip1559(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::Eip7702(tx) => tx.eip2718_encode_with_type(signature, tx.ty(), out), + Self::L1Message(tx) => tx.encode_2718(out), + } + } + + fn eip2718_encode(&self, signature: &Signature, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.eip2718_encode(signature, out), + Self::Eip2930(tx) => tx.eip2718_encode(signature, out), + Self::Eip1559(tx) => tx.eip2718_encode(signature, out), + Self::Eip7702(tx) => tx.eip2718_encode(signature, out), + Self::L1Message(tx) => tx.encode_2718(out), + } + } + + fn network_encode_with_type(&self, signature: &Signature, _ty: u8, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::Eip2930(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::Eip1559(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::Eip7702(tx) => tx.network_encode_with_type(signature, tx.ty(), out), + Self::L1Message(tx) => tx.network_encode(out), + } + } + + fn network_encode(&self, signature: &Signature, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.network_encode(signature, out), + Self::Eip2930(tx) => tx.network_encode(signature, out), + Self::Eip1559(tx) => tx.network_encode(signature, out), + Self::Eip7702(tx) => tx.network_encode(signature, out), + Self::L1Message(tx) => tx.network_encode(out), + } + } + + fn tx_hash_with_type(&self, signature: &Signature, _ty: u8) -> TxHash { + match self { + Self::Legacy(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::Eip2930(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::Eip1559(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::Eip7702(tx) => tx.tx_hash_with_type(signature, tx.ty()), + Self::L1Message(tx) => tx.tx_hash(), + } + } + + fn tx_hash(&self, signature: &Signature) -> TxHash { + match self { + Self::Legacy(tx) => tx.tx_hash(signature), + Self::Eip2930(tx) => tx.tx_hash(signature), + Self::Eip1559(tx) => tx.tx_hash(signature), + Self::Eip7702(tx) => tx.tx_hash(signature), + Self::L1Message(tx) => tx.tx_hash(), + } + } +} + +impl SignableTransaction<Signature> for ScrollTypedTransaction { + fn set_chain_id(&mut self, chain_id: ChainId) { + match self { + Self::Legacy(tx) => tx.set_chain_id(chain_id), + Self::Eip2930(tx) => tx.set_chain_id(chain_id), + Self::Eip1559(tx) => tx.set_chain_id(chain_id), + Self::Eip7702(tx) => tx.set_chain_id(chain_id), + Self::L1Message(_) => {} + } + } + + fn encode_for_signing(&self, out: &mut dyn BufMut) { + match self { + Self::Legacy(tx) => tx.encode_for_signing(out), + Self::Eip2930(tx) => tx.encode_for_signing(out), + Self::Eip1559(tx) => tx.encode_for_signing(out), + Self::Eip7702(tx) => tx.encode_for_signing(out), + Self::L1Message(_) => {} + } + } + + fn payload_len_for_signature(&self) -> usize { + match self { + Self::Legacy(tx) => tx.payload_len_for_signature(), + Self::Eip2930(tx) => tx.payload_len_for_signature(), + Self::Eip1559(tx) => tx.payload_len_for_signature(), + Self::Eip7702(tx) => tx.payload_len_for_signature(), + Self::L1Message(_) => 0, + } + } + + fn into_signed(self, signature: Signature) -> Signed<Self, Signature> + where + Self: Sized, + { + let hash = self.tx_hash(&signature); + Signed::new_unchecked(self, signature, hash) + } +} + +#[cfg(feature = "reth-codec")] +impl Compact for ScrollTypedTransaction { + fn to_compact<B>(&self, out: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let identifier = self.tx_type().to_compact(out); + match self { + Self::Legacy(tx) => tx.to_compact(out), + Self::Eip2930(tx) => tx.to_compact(out), + Self::Eip1559(tx) => tx.to_compact(out), + Self::Eip7702(tx) => tx.to_compact(out), + Self::L1Message(tx) => tx.to_compact(out), + }; + identifier + } + + fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) { + let (tx_type, buf) = ScrollTxType::from_compact(buf, identifier); + match tx_type { + ScrollTxType::Legacy => { + let (tx, buf) = Compact::from_compact(buf, buf.len()); + (Self::Legacy(tx), buf) + } + ScrollTxType::Eip2930 => { + let (tx, buf) = Compact::from_compact(buf, buf.len()); + (Self::Eip2930(tx), buf) + } + ScrollTxType::Eip1559 => { + let (tx, buf) = Compact::from_compact(buf, buf.len()); + (Self::Eip1559(tx), buf) + } + ScrollTxType::Eip7702 => { + let (tx, buf) = Compact::from_compact(buf, buf.len()); + (Self::Eip7702(tx), buf) + } + ScrollTxType::L1Message => { + let (tx, buf) = Compact::from_compact(buf, buf.len()); + (Self::L1Message(tx), buf) + } + } + } +} + +#[cfg(feature = "reth-codec")] +generate_tests!( + #[compact] + ScrollTypedTransaction, + ScrollTypedTransactionTests +); + +#[cfg(feature = "serde")] +mod serde_from { + //! NB: Why do we need this? + //! + //! Because the tag may be missing, we need an abstraction over tagged (with + //! type) and untagged (always legacy). This is + //! [`MaybeTaggedTypedTransaction`]. + //! + //! The tagged variant is [`TaggedTypedTransaction`], which always has a + //! type tag. + //! + //! We serialize via [`TaggedTypedTransaction`] and deserialize via + //! [`MaybeTaggedTypedTransaction`]. + use super::*; + + #[derive(Debug, serde::Deserialize)] + #[serde(untagged)] + pub(crate) enum MaybeTaggedTypedTransaction { + Tagged(TaggedTypedTransaction), + Untagged(TxLegacy), + } + + #[derive(Debug, serde::Serialize, serde::Deserialize)] + #[serde(tag = "type")] + pub(crate) enum TaggedTypedTransaction { + /// `Legacy` transaction + #[serde(rename = "0x00", alias = "0x0")] + Legacy(TxLegacy), + /// `EIP-2930` transaction + #[serde(rename = "0x01", alias = "0x1")] + Eip2930(TxEip2930), + /// `EIP-1559` transaction + #[serde(rename = "0x02", alias = "0x2")] + Eip1559(TxEip1559), + /// `EIP-7702` transaction + #[serde(rename = "0x04", alias = "0x4")] + Eip7702(TxEip7702), + /// `L1Message` transaction + #[serde( + rename = "0x7e", + alias = "0x7E", + serialize_with = "crate::serde_l1_message_tx_rpc" + )] + L1Message(TxL1Message), + } + + impl From<MaybeTaggedTypedTransaction> for ScrollTypedTransaction { + fn from(value: MaybeTaggedTypedTransaction) -> Self { + match value { + MaybeTaggedTypedTransaction::Tagged(tagged) => tagged.into(), + MaybeTaggedTypedTransaction::Untagged(tx) => Self::Legacy(tx), + } + } + } + + impl From<TaggedTypedTransaction> for ScrollTypedTransaction { + fn from(value: TaggedTypedTransaction) -> Self { + match value { + TaggedTypedTransaction::Legacy(signed) => Self::Legacy(signed), + TaggedTypedTransaction::Eip2930(signed) => Self::Eip2930(signed), + TaggedTypedTransaction::Eip1559(signed) => Self::Eip1559(signed), + TaggedTypedTransaction::Eip7702(signed) => Self::Eip7702(signed), + TaggedTypedTransaction::L1Message(tx) => Self::L1Message(tx), + } + } + } + + impl From<ScrollTypedTransaction> for TaggedTypedTransaction { + fn from(value: ScrollTypedTransaction) -> Self { + match value { + ScrollTypedTransaction::Legacy(signed) => Self::Legacy(signed), + ScrollTypedTransaction::Eip2930(signed) => Self::Eip2930(signed), + ScrollTypedTransaction::Eip1559(signed) => Self::Eip1559(signed), + ScrollTypedTransaction::Eip7702(signed) => Self::Eip7702(signed), + ScrollTypedTransaction::L1Message(tx) => Self::L1Message(tx), + } + } + } +}
diff --git reth/crates/scroll/alloy/evm/Cargo.toml scroll-reth/crates/scroll/alloy/evm/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2c61be7214848321493c951beb1b58b134643826 --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/Cargo.toml @@ -0,0 +1,72 @@ +[package] +name = "scroll-alloy-evm" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-consensus = { workspace = true, default-features = false } +alloy-eips = { workspace = true, default-features = false } +alloy-evm = { workspace = true, default-features = false } +alloy-primitives = { workspace = true, default-features = false } + +# revm +revm = { workspace = true, default-features = false, features = ["optional_no_base_fee"] } + +# scroll +revm-scroll = { workspace = true, default-features = false } + +# scroll +scroll-alloy-consensus = { workspace = true, default-features = false } +scroll-alloy-hardforks = { workspace = true, default-features = false } + +# misc +auto_impl = { workspace = true, default-features = false } +serde = { workspace = true, default-features = false, features = ["derive"], optional = true } +encoder-standard = { workspace = true, default-features = false, optional = true } + +[dev-dependencies] +alloy-hardforks.workspace = true +alloy-primitives = { workspace = true, features = ["getrandom"] } +eyre.workspace = true +reth-evm.workspace = true +reth-scroll-chainspec.workspace = true +reth-scroll-evm.workspace = true + +[features] +default = ["std"] +std = [ + "alloy-evm/std", + "alloy-primitives/std", + "revm-scroll/std", + "revm/std", + "serde/std", + "alloy-consensus/std", + "alloy-eips/std", + "scroll-alloy-consensus/std", + "scroll-alloy-hardforks/std", + "reth-evm/std", + "reth-scroll-chainspec/std", + "reth-scroll-evm/std", + "zstd_compression", +] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "revm-scroll/serde", + "revm/serde", + "alloy-eips/serde", + "alloy-consensus/serde", + "scroll-alloy-consensus/serde", + "scroll-alloy-hardforks/serde", + "alloy-hardforks/serde", +] +zstd_compression = ["encoder-standard"]
diff --git reth/crates/scroll/alloy/evm/src/block/curie.rs scroll-reth/crates/scroll/alloy/evm/src/block/curie.rs new file mode 100644 index 0000000000000000000000000000000000000000..4678d42c40939c50194aaf3311582b3d2265f2a0 --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/block/curie.rs @@ -0,0 +1,170 @@ +//! Curie fork transition for Scroll. +//! +//! On block 7096836, Scroll performed a transition to the Curie fork state, which brought various +//! changes to the protocol: +//! 1. Fee reduction cost thanks to the use of compressed blobs on the L1. +//! 2. Modified [EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md) pricing +//! model along with support for EIP-1559 and EIP-2930 transactions. +//! 3. Support for `MLOAD`, `TLOAD` and `MCOPY` ([EIP-1153](https://eips.ethereum.org/EIPS/eip-1153) +//! and [EIP-5656](https://eips.ethereum.org/EIPS/eip-5656)). +//! 4. Dynamic block time. +//! +//! Compressed blobs allowed for more transactions to be stored in each blob, reducing the DA cost +//! per transactions. Accordingly, the L1 gas oracle contract's bytecode was updated in order to +//! reflect the update in the DA costs: +//! - original formula: `(l1GasUsed(txRlp) + overhead) * l1BaseFee * scalar`. +//! - updated formula: `l1BaseFee * commitScalar + len(txRlp) * l1BlobBaseFee * blobScalar`. +//! +//! More details on the Curie update: <https://scroll.io/blog/compressing-the-gas-scrolls-curie-upgrade> + +use alloc::vec; +use revm::{ + bytecode::Bytecode, + database::{states::StorageSlot, State}, + primitives::{bytes, Bytes, U256}, + state::AccountInfo, + Database, +}; + +// Import L1GasPriceOracle address and slots. +use crate::gas_price_oracle::*; + +/// Bytecode of L1 gas price oracle at Curie transition. +pub const CURIE_L1_GAS_PRICE_ORACLE_BYTECODE: Bytes = bytes!("608060405234801561000f575f80fd5b5060043610610132575f3560e01c8063715018a6116100b4578063a911d77f11610079578063a911d77f1461024c578063bede39b514610254578063de26c4a114610267578063e88a60ad1461027a578063f2fde38b1461028d578063f45e65d8146102a0575f80fd5b8063715018a6146101eb57806384189161146101f35780638da5cb5b146101fc57806393e59dc114610226578063944b247f14610239575f80fd5b80633d0f963e116100fa5780633d0f963e146101a057806349948e0e146101b3578063519b4bd3146101c65780636a5e67e5146101cf57806370465597146101d8575f80fd5b80630c18c1621461013657806313dad5be1461015257806323e524ac1461016f5780633577afc51461017857806339455d3a1461018d575b5f80fd5b61013f60025481565b6040519081526020015b60405180910390f35b60085461015f9060ff1681565b6040519015158152602001610149565b61013f60065481565b61018b6101863660046109b3565b6102a9565b005b61018b61019b3660046109ca565b61033b565b61018b6101ae3660046109ea565b610438565b61013f6101c1366004610a2b565b6104bb565b61013f60015481565b61013f60075481565b61018b6101e63660046109b3565b6104e0565b61018b61056e565b61013f60055481565b5f5461020e906001600160a01b031681565b6040516001600160a01b039091168152602001610149565b60045461020e906001600160a01b031681565b61018b6102473660046109b3565b6105a2565b61018b61062e565b61018b6102623660046109b3565b61068a565b61013f610275366004610a2b565b610747565b61018b6102883660046109b3565b610764565b61018b61029b3660046109ea565b6107f0565b61013f60035481565b5f546001600160a01b031633146102db5760405162461bcd60e51b81526004016102d290610ad6565b60405180910390fd5b621c9c388111156102ff57604051635742c80560e11b815260040160405180910390fd5b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610382573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103a69190610b0d565b6103c3576040516326b3506d60e11b815260040160405180910390fd5b600182905560058190556040518281527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200160405180910390a16040518181527f9a14bfb5d18c4c3cf14cae19c23d7cf1bcede357ea40ca1f75cd49542c71c214906020015b60405180910390a15050565b5f546001600160a01b031633146104615760405162461bcd60e51b81526004016102d290610ad6565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f7910161042c565b6008545f9060ff16156104d7576104d18261087b565b92915050565b6104d1826108c1565b5f546001600160a01b031633146105095760405162461bcd60e51b81526004016102d290610ad6565b610519633b9aca006103e8610b40565b81111561053957604051631e44fdeb60e11b815260040160405180910390fd5b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a90602001610330565b5f546001600160a01b031633146105975760405162461bcd60e51b81526004016102d290610ad6565b6105a05f610904565b565b5f546001600160a01b031633146105cb5760405162461bcd60e51b81526004016102d290610ad6565b6105d9633b9aca0080610b40565b8111156105f95760405163874f603160e01b815260040160405180910390fd5b60068190556040518181527f2ab3f5a4ebbcbf3c24f62f5454f52f10e1a8c9dcc5acac8f19199ce881a6a10890602001610330565b5f546001600160a01b031633146106575760405162461bcd60e51b81526004016102d290610ad6565b60085460ff161561067b576040516379f9c57560e01b815260040160405180910390fd5b6008805460ff19166001179055565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa1580156106d1573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906106f59190610b0d565b610712576040516326b3506d60e11b815260040160405180910390fd5b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c4490602001610330565b6008545f9060ff161561075b57505f919050565b6104d182610953565b5f546001600160a01b0316331461078d5760405162461bcd60e51b81526004016102d290610ad6565b61079b633b9aca0080610b40565b8111156107bb5760405163f37ec21560e01b815260040160405180910390fd5b60078190556040518181527f6b332a036d8c3ead57dcb06c87243bd7a2aed015ddf2d0528c2501dae56331aa90602001610330565b5f546001600160a01b031633146108195760405162461bcd60e51b81526004016102d290610ad6565b6001600160a01b03811661086f5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016102d2565b61087881610904565b50565b5f633b9aca0060055483516007546108939190610b40565b61089d9190610b40565b6001546006546108ad9190610b40565b6108b79190610b57565b6104d19190610b6a565b5f806108cc83610953565b90505f600154826108dd9190610b40565b9050633b9aca00600354826108f29190610b40565b6108fc9190610b6a565b949350505050565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80515f908190815b818110156109a45784818151811061097557610975610b89565b01602001516001600160f81b0319165f036109955760048301925061099c565b6010830192505b60010161095b565b50506002540160400192915050565b5f602082840312156109c3575f80fd5b5035919050565b5f80604083850312156109db575f80fd5b50508035926020909101359150565b5f602082840312156109fa575f80fd5b81356001600160a01b0381168114610a10575f80fd5b9392505050565b634e487b7160e01b5f52604160045260245ffd5b5f60208284031215610a3b575f80fd5b813567ffffffffffffffff80821115610a52575f80fd5b818401915084601f830112610a65575f80fd5b813581811115610a7757610a77610a17565b604051601f8201601f19908116603f01168101908382118183101715610a9f57610a9f610a17565b81604052828152876020848701011115610ab7575f80fd5b826020860160208301375f928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b5f60208284031215610b1d575f80fd5b81518015158114610a10575f80fd5b634e487b7160e01b5f52601160045260245ffd5b80820281158282048414176104d1576104d1610b2c565b808201808211156104d1576104d1610b2c565b5f82610b8457634e487b7160e01b5f52601260045260245ffd5b500490565b634e487b7160e01b5f52603260045260245ffdfea26469706673582212200c2ac583f18be4f94ab169ae6f2ea3a708a7c0d4424746b120b177adb39e626064736f6c63430008180033"); + +/// Storage update of L1 gas price oracle at Curie transition. +pub const CURIE_L1_GAS_PRICE_ORACLE_STORAGE: [(U256, U256); 4] = [ + (GPO_L1_BLOB_BASE_FEE_SLOT, INITIAL_L1_BLOB_BASE_FEE), + (GPO_COMMIT_SCALAR_SLOT, INITIAL_COMMIT_SCALAR), + (GPO_BLOB_SCALAR_SLOT, INITIAL_BLOB_SCALAR), + (GPO_IS_CURIE_SLOT, IS_CURIE), +]; + +/// The initial blob base fee used by the oracle contract. +pub const INITIAL_L1_BLOB_BASE_FEE: U256 = U256::from_limbs([1, 0, 0, 0]); +/// The initial commit scalar used by the oracle contract. +pub const INITIAL_COMMIT_SCALAR: U256 = U256::from_limbs([230759955285, 0, 0, 0]); +/// The initial blob scalar used by the oracle contract. +pub const INITIAL_BLOB_SCALAR: U256 = U256::from_limbs([417565260, 0, 0, 0]); +/// Curie slot is set to 1 (true) after the Curie block fork. +pub const IS_CURIE: U256 = U256::from_limbs([1, 0, 0, 0]); + +/// Applies the Scroll Curie hard fork to the state: +/// - Updates the L1 oracle contract bytecode to reflect the DA cost reduction. +/// - Sets the initial blob base fee, commit and blob scalar and sets the `isCurie` slot to 1 +/// (true). +pub(super) fn apply_curie_hard_fork<DB: Database>(state: &mut State<DB>) -> Result<(), DB::Error> { + let oracle = state.load_cache_account(L1_GAS_PRICE_ORACLE_ADDRESS)?; + + // compute the code hash + let bytecode = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); + let code_hash = bytecode.hash_slow(); + + // get the old oracle account info + let old_oracle_info = oracle.account_info().unwrap_or_default(); + + // init new oracle account information + let new_oracle_info = AccountInfo { code_hash, code: Some(bytecode), ..old_oracle_info }; + + // init new storage + let new_storage = CURIE_L1_GAS_PRICE_ORACLE_STORAGE + .into_iter() + .map(|(slot, present_value)| { + ( + slot, + StorageSlot { + present_value, + previous_or_original_value: oracle.storage_slot(slot).unwrap_or_default(), + }, + ) + }) + .collect(); + + // create transition for oracle new account info and storage + let transition = oracle.change(new_oracle_info, new_storage); + + // add transition + if let Some(s) = state.transition_state.as_mut() { + s.add_transitions(vec![(L1_GAS_PRICE_ORACLE_ADDRESS, transition)]) + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use revm::{ + database::{ + states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, + EmptyDB, State, + }, + primitives::{bytes, keccak256, U256}, + state::{AccountInfo, Bytecode}, + Database, + }; + use std::str::FromStr; + + #[test] + fn test_apply_curie_fork() -> eyre::Result<()> { + // init state + let db = EmptyDB::new(); + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + // oracle pre fork state + let bytecode_pre_fork = Bytecode::new_raw( bytes!("608060405234801561001057600080fd5b50600436106100cf5760003560e01c8063715018a61161008c578063bede39b511610066578063bede39b51461018d578063de26c4a1146101a0578063f2fde38b146101b3578063f45e65d8146101c657600080fd5b8063715018a6146101475780638da5cb5b1461014f57806393e59dc11461017a57600080fd5b80630c18c162146100d45780633577afc5146100f05780633d0f963e1461010557806349948e0e14610118578063519b4bd31461012b5780637046559714610134575b600080fd5b6100dd60025481565b6040519081526020015b60405180910390f35b6101036100fe366004610671565b6101cf565b005b61010361011336600461068a565b610291565b6100dd6101263660046106d0565b61031c565b6100dd60015481565b610103610142366004610671565b610361565b610103610416565b600054610162906001600160a01b031681565b6040516001600160a01b0390911681526020016100e7565b600454610162906001600160a01b031681565b61010361019b366004610671565b61044c565b6100dd6101ae3660046106d0565b610533565b6101036101c136600461068a565b610595565b6100dd60035481565b6000546001600160a01b031633146102025760405162461bcd60e51b81526004016101f990610781565b60405180910390fd5b621c9c388111156102555760405162461bcd60e51b815260206004820152601760248201527f657863656564206d6178696d756d206f7665726865616400000000000000000060448201526064016101f9565b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6000546001600160a01b031633146102bb5760405162461bcd60e51b81526004016101f990610781565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f7910160405180910390a15050565b60008061032883610533565b905060006001548261033a91906107b8565b9050633b9aca006003548261034f91906107b8565b61035991906107e5565b949350505050565b6000546001600160a01b0316331461038b5760405162461bcd60e51b81526004016101f990610781565b61039b633b9aca006103e86107b8565b8111156103e15760405162461bcd60e51b8152602060048201526014602482015273657863656564206d6178696d756d207363616c6560601b60448201526064016101f9565b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a90602001610286565b6000546001600160a01b031633146104405760405162461bcd60e51b81526004016101f990610781565b61044a6000610621565b565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610495573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104b99190610807565b6104fe5760405162461bcd60e51b81526020600482015260166024820152752737ba103bb434ba32b634b9ba32b21039b2b73232b960511b60448201526064016101f9565b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c4490602001610286565b80516000908190815b818110156105865784818151811061055657610556610829565b01602001516001600160f81b0319166000036105775760048301925061057e565b6010830192505b60010161053c565b50506002540160400192915050565b6000546001600160a01b031633146105bf5760405162461bcd60e51b81526004016101f990610781565b6001600160a01b0381166106155760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016101f9565b61061e81610621565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561068357600080fd5b5035919050565b60006020828403121561069c57600080fd5b81356001600160a01b03811681146106b357600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000602082840312156106e257600080fd5b813567ffffffffffffffff808211156106fa57600080fd5b818401915084601f83011261070e57600080fd5b813581811115610720576107206106ba565b604051601f8201601f19908116603f01168101908382118183101715610748576107486106ba565b8160405282815287602084870101111561076157600080fd5b826020860160208301376000928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b60008160001904831182151516156107e057634e487b7160e01b600052601160045260246000fd5b500290565b60008261080257634e487b7160e01b600052601260045260246000fd5b500490565b60006020828403121561081957600080fd5b815180151581146106b357600080fd5b634e487b7160e01b600052603260045260246000fdfea26469706673582212205ea335809638809cf032c794fd966e2439020737b1dcc2218435cb438286efcf64736f6c63430008100033")); + let oracle_pre_fork = AccountInfo { + code_hash: bytecode_pre_fork.hash_slow(), + code: Some(bytecode_pre_fork), + ..Default::default() + }; + let oracle_storage_pre_fork = PlainStorage::from_iter([ + (GPO_OWNER_SLOT, U256::from_str("0x13d24a7ff6f5ec5ff0e9c40fc3b8c9c01c65437b")?), + (GPO_L1_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_OVERHEAD_SLOT, U256::from(0x38)), + (GPO_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_WHITELIST_SLOT, U256::from_str("0x5300000000000000000000000000000000000003")?), + ]); + state.insert_account_with_storage( + L1_GAS_PRICE_ORACLE_ADDRESS, + oracle_pre_fork.clone(), + oracle_storage_pre_fork.clone(), + ); + + // apply curie fork + apply_curie_hard_fork(&mut state)?; + + // merge transitions + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // check oracle account info + let oracle = bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS).unwrap().clone(); + let code_hash = keccak256(&CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); + let bytecode = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); + let expected_oracle_info = + AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; + + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); + + // check oracle storage changeset + let mut storage = oracle.storage.into_iter().collect::<Vec<(U256, StorageSlot)>>(); + storage.sort_by(|(a, _), (b, _)| a.cmp(b)); + for (got, expected) in storage.into_iter().zip(CURIE_L1_GAS_PRICE_ORACLE_STORAGE) { + assert_eq!(got.0, expected.0); + assert_eq!(got.1, StorageSlot { present_value: expected.1, ..Default::default() }); + } + + // check oracle original storage + for (slot, value) in oracle_storage_pre_fork { + assert_eq!(state.storage(L1_GAS_PRICE_ORACLE_ADDRESS, slot)?, value) + } + + // check deployed contract + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + + Ok(()) + } +}
diff --git reth/crates/scroll/alloy/evm/src/block/feynman.rs scroll-reth/crates/scroll/alloy/evm/src/block/feynman.rs new file mode 100644 index 0000000000000000000000000000000000000000..555338e3cb6e385bc4ca219be5e477ac3521a969 --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/block/feynman.rs @@ -0,0 +1,221 @@ +//! Feynman fork transition for Scroll. + +use alloc::vec; +use revm::{ + bytecode::Bytecode, + database::{states::StorageSlot, State}, + primitives::{bytes, Bytes, U256}, + state::AccountInfo, + Database, +}; + +// Import L1GasPriceOracle address and slots. +use crate::gas_price_oracle::*; + +/// Bytecode of L1 gas price oracle at Feynman transition. +pub const FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE: Bytes = bytes!("608060405234801561000f575f80fd5b50600436106101a1575f3560e01c806384189161116100f3578063c63b9e2d11610093578063e88a60ad1161006e578063e88a60ad1461032e578063f2fde38b14610341578063f45e65d814610354578063fe5b04151461035d575f80fd5b8063c63b9e2d146102ff578063c91e514914610312578063de26c4a11461031b575f80fd5b8063944b247f116100ce578063944b247f146102be578063a911d77f146102d1578063aa5e9334146102d9578063bede39b5146102ec575f80fd5b806384189161146102785780638da5cb5b1461028157806393e59dc1146102ab575f80fd5b80633d0f963e1161015e5780636112d6db116101395780636112d6db1461024b5780636a5e67e514610254578063704655971461025d578063715018a614610270575f80fd5b80633d0f963e1461021c57806349948e0e1461022f578063519b4bd314610242575f80fd5b80630c18c162146101a557806313dad5be146101c157806323e524ac146101de5780633577afc5146101e757806339455d3a146101fc5780633b7656bb1461020f575b5f80fd5b6101ae60025481565b6040519081526020015b60405180910390f35b6008546101ce9060ff1681565b60405190151581526020016101b8565b6101ae60065481565b6101fa6101f5366004610c73565b610365565b005b6101fa61020a366004610c8a565b6103f7565b600b546101ce9060ff1681565b6101fa61022a366004610caa565b6104f4565b6101ae61023d366004610ceb565b610577565b6101ae60015481565b6101ae600a5481565b6101ae60075481565b6101fa61026b366004610c73565b6105b0565b6101fa61063e565b6101ae60055481565b5f54610293906001600160a01b031681565b6040516001600160a01b0390911681526020016101b8565b600454610293906001600160a01b031681565b6101fa6102cc366004610c73565b610672565b6101fa6106fe565b6101fa6102e7366004610c73565b61075a565b6101fa6102fa366004610c73565b6107f4565b6101fa61030d366004610c73565b6108b1565b6101ae60095481565b6101ae610329366004610ceb565b61094a565b6101fa61033c366004610c73565b610974565b6101fa61034f366004610caa565b610a00565b6101ae60035481565b6101fa610a8b565b5f546001600160a01b031633146103975760405162461bcd60e51b815260040161038e90610d96565b60405180910390fd5b621c9c388111156103bb57604051635742c80560e11b815260040160405180910390fd5b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561043e573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104629190610dcd565b61047f576040516326b3506d60e11b815260040160405180910390fd5b600182905560058190556040518281527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200160405180910390a16040518181527f9a14bfb5d18c4c3cf14cae19c23d7cf1bcede357ea40ca1f75cd49542c71c214906020015b60405180910390a15050565b5f546001600160a01b0316331461051d5760405162461bcd60e51b815260040161038e90610d96565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f791016104e8565b600b545f9060ff16156105935761058d82610ae7565b92915050565b60085460ff16156105a75761058d82610b45565b61058d82610b81565b5f546001600160a01b031633146105d95760405162461bcd60e51b815260040161038e90610d96565b6105e9633b9aca006103e8610e00565b81111561060957604051631e44fdeb60e11b815260040160405180910390fd5b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a906020016103ec565b5f546001600160a01b031633146106675760405162461bcd60e51b815260040161038e90610d96565b6106705f610bc4565b565b5f546001600160a01b0316331461069b5760405162461bcd60e51b815260040161038e90610d96565b6106a9633b9aca0080610e00565b8111156106c95760405163874f603160e01b815260040160405180910390fd5b60068190556040518181527f2ab3f5a4ebbcbf3c24f62f5454f52f10e1a8c9dcc5acac8f19199ce881a6a108906020016103ec565b5f546001600160a01b031633146107275760405162461bcd60e51b815260040161038e90610d96565b60085460ff161561074b576040516379f9c57560e01b815260040160405180910390fd5b6008805460ff19166001179055565b5f546001600160a01b031633146107835760405162461bcd60e51b815260040161038e90610d96565b633b9aca008110806107a1575061079e633b9aca0080610e00565b81115b156107bf5760405163d9b5dcdf60e01b815260040160405180910390fd5b60098190556040518181527fd50d3079c77df569cd58d55d4e5614bfe7066449009425d22bde8e75242f50bb906020016103ec565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561083b573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061085f9190610dcd565b61087c576040516326b3506d60e11b815260040160405180910390fd5b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c44906020016103ec565b5f546001600160a01b031633146108da5760405162461bcd60e51b815260040161038e90610d96565b633b9aca008110806108f857506108f5633b9aca0080610e00565b81115b156109155760405162ae184360e01b815260040160405180910390fd5b600a8190556040518181527f8647cebb7e57360673a28415c0bed2f68c42a86c5035f1c9b2eda2b09509288a906020016103ec565b600b545f9060ff168061095f575060085460ff165b1561096b57505f919050565b61058d82610c13565b5f546001600160a01b0316331461099d5760405162461bcd60e51b815260040161038e90610d96565b6109ab633b9aca0080610e00565b8111156109cb5760405163f37ec21560e01b815260040160405180910390fd5b60078190556040518181527f6b332a036d8c3ead57dcb06c87243bd7a2aed015ddf2d0528c2501dae56331aa906020016103ec565b5f546001600160a01b03163314610a295760405162461bcd60e51b815260040161038e90610d96565b6001600160a01b038116610a7f5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161038e565b610a8881610bc4565b50565b5f546001600160a01b03163314610ab45760405162461bcd60e51b815260040161038e90610d96565b600b5460ff1615610ad857604051631a7c228b60e21b815260040160405180910390fd5b600b805460ff19166001179055565b5f633b9aca0080600a548451600554600754610b039190610e00565b600154600654610b139190610e00565b610b1d9190610e17565b610b279190610e00565b610b319190610e00565b610b3b9190610e2a565b61058d9190610e2a565b5f633b9aca006005548351600754610b5d9190610e00565b610b679190610e00565b600154600654610b779190610e00565b610b3b9190610e17565b5f80610b8c83610c13565b90505f60015482610b9d9190610e00565b9050633b9aca0060035482610bb29190610e00565b610bbc9190610e2a565b949350505050565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80515f908190815b81811015610c6457848181518110610c3557610c35610e49565b01602001516001600160f81b0319165f03610c5557600483019250610c5c565b6010830192505b600101610c1b565b50506002540160400192915050565b5f60208284031215610c83575f80fd5b5035919050565b5f8060408385031215610c9b575f80fd5b50508035926020909101359150565b5f60208284031215610cba575f80fd5b81356001600160a01b0381168114610cd0575f80fd5b9392505050565b634e487b7160e01b5f52604160045260245ffd5b5f60208284031215610cfb575f80fd5b813567ffffffffffffffff80821115610d12575f80fd5b818401915084601f830112610d25575f80fd5b813581811115610d3757610d37610cd7565b604051601f8201601f19908116603f01168101908382118183101715610d5f57610d5f610cd7565b81604052828152876020848701011115610d77575f80fd5b826020860160208301375f928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b5f60208284031215610ddd575f80fd5b81518015158114610cd0575f80fd5b634e487b7160e01b5f52601160045260245ffd5b808202811582820484141761058d5761058d610dec565b8082018082111561058d5761058d610dec565b5f82610e4457634e487b7160e01b5f52601260045260245ffd5b500490565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a"); + +/// The initial compression penalty threshold used by the oracle contract. +const INITIAL_PENALTY_THRESHOLD: U256 = U256::from_limbs([1_000_000_000, 0, 0, 0]); +/// The initial compression penalty factor used by the oracle contract. +const INITIAL_PENALTY_FACTOR: U256 = U256::from_limbs([1_000_000_000, 0, 0, 0]); +/// Feynman slot is set to 1 (true) after the Feynman block fork. +const IS_FEYNMAN: U256 = U256::from_limbs([1, 0, 0, 0]); + +/// Storage update of L1 gas price oracle at Feynman transition. +const FEYNMAN_L1_GAS_PRICE_ORACLE_STORAGE: [(U256, U256); 3] = [ + (GPO_PENALTY_THRESHOLD_SLOT, INITIAL_PENALTY_THRESHOLD), + (GPO_PENALTY_FACTOR_SLOT, INITIAL_PENALTY_FACTOR), + (GPO_IS_FEYNMAN_SLOT, IS_FEYNMAN), +]; + +/// Applies the Scroll Feynman hard fork to the state: +/// - Updates the L1 oracle contract bytecode to reflect the DA cost reduction. +/// - Sets the initial compression penalty threshold and penalty factor values. +/// - Sets the `isFeynman` slot to 1 (true). +pub(super) fn apply_feynman_hard_fork<DB: Database>( + state: &mut State<DB>, +) -> Result<(), DB::Error> { + // No-op if already applied. + // Note: This requires a storage read for every Feynman block, and it means this + // read needs to be included in the execution witness. Unfortunately, there is no + // other reliable way to apply the change only at the transition block, since + // `ScrollBlockExecutor` does not have access to the parent timestamp. + if state.storage(L1_GAS_PRICE_ORACLE_ADDRESS, GPO_IS_FEYNMAN_SLOT)? == IS_FEYNMAN { + return Ok(()) + } + + let oracle = state.load_cache_account(L1_GAS_PRICE_ORACLE_ADDRESS)?; + + // compute the code hash + let bytecode = Bytecode::new_raw(FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE); + let code_hash = bytecode.hash_slow(); + + // get the old oracle account info + let old_oracle_info = oracle.account_info().unwrap_or_default(); + + // init new oracle account information + let new_oracle_info = AccountInfo { code_hash, code: Some(bytecode), ..old_oracle_info }; + + // init new storage + let new_storage = FEYNMAN_L1_GAS_PRICE_ORACLE_STORAGE + .into_iter() + .map(|(slot, present_value)| { + ( + slot, + StorageSlot { + present_value, + previous_or_original_value: oracle.storage_slot(slot).unwrap_or_default(), + }, + ) + }) + .collect(); + + // create transition for oracle new account info and storage + let transition = oracle.change(new_oracle_info, new_storage); + + // add transition + if let Some(s) = state.transition_state.as_mut() { + s.add_transitions(vec![(L1_GAS_PRICE_ORACLE_ADDRESS, transition)]) + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::curie::CURIE_L1_GAS_PRICE_ORACLE_BYTECODE; + use revm::{ + database::{ + states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, + CacheDB, EmptyDB, State, + }, + primitives::{keccak256, U256}, + state::{AccountInfo, Bytecode}, + Database, + }; + use std::str::FromStr; + + #[test] + fn test_apply_feynman_fork() -> eyre::Result<()> { + // init state + let db = EmptyDB::new(); + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + // oracle pre fork state + let bytecode_pre_fork = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); + let oracle_pre_fork = AccountInfo { + code_hash: bytecode_pre_fork.hash_slow(), + code: Some(bytecode_pre_fork), + ..Default::default() + }; + let oracle_storage_pre_fork = PlainStorage::from_iter([ + // owner + (GPO_OWNER_SLOT, U256::from_str("0x13d24a7ff6f5ec5ff0e9c40fc3b8c9c01c65437b")?), + (GPO_L1_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_OVERHEAD_SLOT, U256::from(0x38)), + (GPO_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_WHITELIST_SLOT, U256::from_str("0x5300000000000000000000000000000000000003")?), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_BLOB_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + ]); + state.insert_account_with_storage( + L1_GAS_PRICE_ORACLE_ADDRESS, + oracle_pre_fork.clone(), + oracle_storage_pre_fork.clone(), + ); + + // apply feynman fork + apply_feynman_hard_fork(&mut state)?; + + // merge transitions + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // check oracle account info + let oracle = bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS).unwrap().clone(); + let code_hash = keccak256(&FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE); + let bytecode = Bytecode::new_raw(FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE); + let expected_oracle_info = + AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; + + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); + + // check oracle storage changeset + let mut storage = oracle.storage.into_iter().collect::<Vec<(U256, StorageSlot)>>(); + storage.sort_by(|(a, _), (b, _)| a.cmp(b)); + for (got, expected) in storage.into_iter().zip(FEYNMAN_L1_GAS_PRICE_ORACLE_STORAGE) { + assert_eq!(got.0, expected.0); + assert_eq!(got.1, StorageSlot { present_value: expected.1, ..Default::default() }); + } + + // check oracle original storage + for (slot, value) in oracle_storage_pre_fork { + assert_eq!(state.storage(L1_GAS_PRICE_ORACLE_ADDRESS, slot)?, value) + } + + // check deployed contract + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + + Ok(()) + } + + #[test] + fn test_apply_feynman_fork_only_once() -> eyre::Result<()> { + let bytecode = Bytecode::new_raw(FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE); + + let oracle_account = AccountInfo { + code_hash: bytecode.hash_slow(), + code: Some(bytecode), + ..Default::default() + }; + + let oracle_storage = PlainStorage::from_iter([ + (GPO_OWNER_SLOT, U256::from_str("0x13d24a7ff6f5ec5ff0e9c40fc3b8c9c01c65437b")?), + (GPO_L1_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_OVERHEAD_SLOT, U256::from(0x38)), + (GPO_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_WHITELIST_SLOT, U256::from_str("0x5300000000000000000000000000000000000003")?), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_BLOB_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(1_100_000_000u64)), + (GPO_PENALTY_FACTOR_SLOT, U256::from(3_000_000_000u64)), + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + ]); + + // init state, + // we write to db directly to make sure we do not have account storage in cache + let mut db = CacheDB::new(EmptyDB::default()); + + db.insert_account_info(L1_GAS_PRICE_ORACLE_ADDRESS, oracle_account); + + for (slot, value) in oracle_storage { + db.insert_account_storage(L1_GAS_PRICE_ORACLE_ADDRESS, slot, value).unwrap(); + } + + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + // make sure account is in cache + state.load_cache_account(L1_GAS_PRICE_ORACLE_ADDRESS)?; + + // apply feynman fork + apply_feynman_hard_fork(&mut state)?; + + // merge transitions + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // isFeynman is already set, apply_feynman_hard_fork should be a no-op + assert_eq!(bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS), None); + + Ok(()) + } +}
diff --git reth/crates/scroll/alloy/evm/src/block/galileo_v2.rs scroll-reth/crates/scroll/alloy/evm/src/block/galileo_v2.rs new file mode 100644 index 0000000000000000000000000000000000000000..b48dce60ac9c3c401bee19e97b4be536f059edbd --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/block/galileo_v2.rs @@ -0,0 +1,222 @@ +//! `GalileoV2` fork transition for Scroll. + +use alloc::vec; +use revm::{ + bytecode::Bytecode, + database::{states::StorageSlot, State}, + primitives::{bytes, Bytes, U256}, + state::AccountInfo, + Database, +}; + +// Import L1GasPriceOracle address and slots. +use crate::gas_price_oracle::*; + +/// Bytecode of `L1GasPriceOracle` at `GalileoV2` transition. +/// Run these commands in the scroll-contracts repo to verify this bytecode: +/// +/// git checkout dfffa0f04bbd1de31ef342e1642a2f9ad9a620fe +/// yarn +/// forge build +/// cat artifacts/src/L1GasPriceOracle.sol/L1GasPriceOracle.json | jq -r .deployedBytecode.object +const GALILEO_V2_L1_GAS_PRICE_ORACLE_BYTECODE: Bytes = bytes!("608060405234801561000f575f80fd5b50600436106101c6575f3560e01c8063715018a6116100fe578063bede39b51161009e578063e88a60ad1161006e578063e88a60ad1461035d578063f2fde38b14610370578063f45e65d814610383578063fe5b04151461038c575f80fd5b8063bede39b51461031c578063c63b9e2d1461032f578063c91e514914610342578063de26c4a11461034a575f80fd5b80638da5cb5b116100d95780638da5cb5b146102c457806393e59dc1146102ee578063944b247f14610301578063a911d77f14610314575f80fd5b8063715018a6146102ab5780637f977cbf146102b357806384189161146102bb575f80fd5b80633d0f963e116101695780635471db39116101445780635471db391461027d5780636112d6db146102865780636a5e67e51461028f5780637046559714610298575f80fd5b80633d0f963e1461024e57806349948e0e14610261578063519b4bd314610274575f80fd5b806323e524ac116101a457806323e524ac146102105780633577afc51461021957806339455d3a1461022e5780633b7656bb14610241575f80fd5b80630c18c162146101ca5780630f337f6d146101e657806313dad5be14610203575b5f80fd5b6101d360025481565b6040519081526020015b60405180910390f35b600c546101f39060ff1681565b60405190151581526020016101dd565b6008546101f39060ff1681565b6101d360065481565b61022c610227366004610ccf565b610394565b005b61022c61023c366004610ce6565b610426565b600b546101f39060ff1681565b61022c61025c366004610d06565b610523565b6101d361026f366004610d47565b6105a6565b6101d360015481565b6101d360095481565b6101d3600a5481565b6101d360075481565b61022c6102a6366004610ccf565b6105f3565b61022c610681565b61022c6106b5565b6101d360055481565b5f546102d6906001600160a01b031681565b6040516001600160a01b0390911681526020016101dd565b6004546102d6906001600160a01b031681565b61022c61030f366004610ccf565b610711565b61022c61079d565b61022c61032a366004610ccf565b6107f9565b61022c61033d366004610ccf565b6108b6565b6009546101d3565b6101d3610358366004610d47565b610933565b61022c61036b366004610ccf565b61096a565b61022c61037e366004610d06565b6109f6565b6101d360035481565b61022c610a81565b5f546001600160a01b031633146103c65760405162461bcd60e51b81526004016103bd90610df2565b60405180910390fd5b621c9c388111156103ea57604051635742c80560e11b815260040160405180910390fd5b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561046d573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104919190610e29565b6104ae576040516326b3506d60e11b815260040160405180910390fd5b600182905560058190556040518281527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200160405180910390a16040518181527f9a14bfb5d18c4c3cf14cae19c23d7cf1bcede357ea40ca1f75cd49542c71c214906020015b60405180910390a15050565b5f546001600160a01b0316331461054c5760405162461bcd60e51b81526004016103bd90610df2565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f79101610517565b600c545f9060ff16156105c2576105bc82610add565b92915050565b600b5460ff16156105d6576105bc82610b55565b60085460ff16156105ea576105bc82610bb3565b6105bc82610bef565b5f546001600160a01b0316331461061c5760405162461bcd60e51b81526004016103bd90610df2565b61062c633b9aca006103e8610e5c565b81111561064c57604051631e44fdeb60e11b815260040160405180910390fd5b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a9060200161041b565b5f546001600160a01b031633146106aa5760405162461bcd60e51b81526004016103bd90610df2565b6106b35f610c20565b565b5f546001600160a01b031633146106de5760405162461bcd60e51b81526004016103bd90610df2565b600c5460ff16156107025760405163182389a760e01b815260040160405180910390fd5b600c805460ff19166001179055565b5f546001600160a01b0316331461073a5760405162461bcd60e51b81526004016103bd90610df2565b610748633b9aca0080610e5c565b8111156107685760405163874f603160e01b815260040160405180910390fd5b60068190556040518181527f2ab3f5a4ebbcbf3c24f62f5454f52f10e1a8c9dcc5acac8f19199ce881a6a1089060200161041b565b5f546001600160a01b031633146107c65760405162461bcd60e51b81526004016103bd90610df2565b60085460ff16156107ea576040516379f9c57560e01b815260040160405180910390fd5b6008805460ff19166001179055565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610840573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906108649190610e29565b610881576040516326b3506d60e11b815260040160405180910390fd5b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200161041b565b5f546001600160a01b031633146108df5760405162461bcd60e51b81526004016103bd90610df2565b805f036108fe5760405162ae184360e01b815260040160405180910390fd5b600a8190556040518181527f8647cebb7e57360673a28415c0bed2f68c42a86c5035f1c9b2eda2b09509288a9060200161041b565b600c545f9060ff16806109485750600b5460ff165b80610955575060085460ff165b1561096157505f919050565b6105bc82610c6f565b5f546001600160a01b031633146109935760405162461bcd60e51b81526004016103bd90610df2565b6109a1633b9aca0080610e5c565b8111156109c15760405163f37ec21560e01b815260040160405180910390fd5b60078190556040518181527f6b332a036d8c3ead57dcb06c87243bd7a2aed015ddf2d0528c2501dae56331aa9060200161041b565b5f546001600160a01b03163314610a1f5760405162461bcd60e51b81526004016103bd90610df2565b6001600160a01b038116610a755760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016103bd565b610a7e81610c20565b50565b5f546001600160a01b03163314610aaa5760405162461bcd60e51b81526004016103bd90610df2565b600b5460ff1615610ace57604051631a7c228b60e21b815260040160405180910390fd5b600b805460ff19166001179055565b5f808251600554600754610af19190610e5c565b600154600654610b019190610e5c565b610b0b9190610e73565b610b159190610e5c565b90505f600a54845183610b289190610e5c565b610b329190610e86565b9050633b9aca00610b438284610e73565b610b4d9190610e86565b949350505050565b5f633b9aca0080600a548451600554600754610b719190610e5c565b600154600654610b819190610e5c565b610b8b9190610e73565b610b959190610e5c565b610b9f9190610e5c565b610ba99190610e86565b6105bc9190610e86565b5f633b9aca006005548351600754610bcb9190610e5c565b610bd59190610e5c565b600154600654610be59190610e5c565b610ba99190610e73565b5f80610bfa83610c6f565b90505f60015482610c0b9190610e5c565b9050633b9aca0060035482610b439190610e5c565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80515f908190815b81811015610cc057848181518110610c9157610c91610ea5565b01602001516001600160f81b0319165f03610cb157600483019250610cb8565b6010830192505b600101610c77565b50506002540160400192915050565b5f60208284031215610cdf575f80fd5b5035919050565b5f8060408385031215610cf7575f80fd5b50508035926020909101359150565b5f60208284031215610d16575f80fd5b81356001600160a01b0381168114610d2c575f80fd5b9392505050565b634e487b7160e01b5f52604160045260245ffd5b5f60208284031215610d57575f80fd5b813567ffffffffffffffff80821115610d6e575f80fd5b818401915084601f830112610d81575f80fd5b813581811115610d9357610d93610d33565b604051601f8201601f19908116603f01168101908382118183101715610dbb57610dbb610d33565b81604052828152876020848701011115610dd3575f80fd5b826020860160208301375f928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b5f60208284031215610e39575f80fd5b81518015158114610d2c575f80fd5b634e487b7160e01b5f52601160045260245ffd5b80820281158282048414176105bc576105bc610e48565b808201808211156105bc576105bc610e48565b5f82610ea057634e487b7160e01b5f52601260045260245ffd5b500490565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a"); + +/// Galileo slot is set to 1 (true) after the `GalileoV2` block fork. +const IS_GALILEO: U256 = U256::from_limbs([1, 0, 0, 0]); + +/// Storage update of L1 gas price oracle at `GalileoV2` transition. +const GALILEO_V2_L1_GAS_PRICE_ORACLE_STORAGE: [(U256, U256); 1] = + [(GPO_IS_GALILEO_SLOT, IS_GALILEO)]; + +/// Applies the Scroll `GalileoV2` hard fork to the state: +/// - Updates the L1 oracle contract bytecode. +/// - Sets the `isGalileo` slot to 1 (true). +pub(super) fn apply_galileo_v2_hard_fork<DB: Database>( + state: &mut State<DB>, +) -> Result<(), DB::Error> { + // No-op if already applied. + // Note: This requires a storage read for every block after `GalileoV2`, and it means this + // read needs to be included in the execution witness. Unfortunately, there is no + // other reliable way to apply the change only at the transition block, since + // `ScrollBlockExecutor` does not have access to the parent timestamp. + if state.storage(L1_GAS_PRICE_ORACLE_ADDRESS, GPO_IS_GALILEO_SLOT)? == IS_GALILEO { + return Ok(()) + } + + let oracle = state.load_cache_account(L1_GAS_PRICE_ORACLE_ADDRESS)?; + + // compute the code hash + let bytecode = Bytecode::new_raw(GALILEO_V2_L1_GAS_PRICE_ORACLE_BYTECODE); + let code_hash = bytecode.hash_slow(); + + // get the old oracle account info + let old_oracle_info = oracle.account_info().unwrap_or_default(); + + // init new oracle account information + let new_oracle_info = AccountInfo { code_hash, code: Some(bytecode), ..old_oracle_info }; + + // init new storage + let new_storage = GALILEO_V2_L1_GAS_PRICE_ORACLE_STORAGE + .into_iter() + .map(|(slot, present_value)| { + ( + slot, + StorageSlot { + present_value, + previous_or_original_value: oracle.storage_slot(slot).unwrap_or_default(), + }, + ) + }) + .collect(); + + // create transition for oracle new account info and storage + let transition = oracle.change(new_oracle_info, new_storage); + + // add transition + if let Some(s) = state.transition_state.as_mut() { + s.add_transitions(vec![(L1_GAS_PRICE_ORACLE_ADDRESS, transition)]) + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::feynman::FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE; + use revm::{ + database::{ + states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, + CacheDB, EmptyDB, State, + }, + primitives::{keccak256, U256}, + state::{AccountInfo, Bytecode}, + Database, + }; + use std::str::FromStr; + + #[test] + fn test_apply_galileo_v2_fork() -> eyre::Result<()> { + // init state + let db = EmptyDB::new(); + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + // oracle pre fork state + let bytecode_pre_fork = Bytecode::new_raw(FEYNMAN_L1_GAS_PRICE_ORACLE_BYTECODE); + let oracle_pre_fork = AccountInfo { + code_hash: bytecode_pre_fork.hash_slow(), + code: Some(bytecode_pre_fork), + ..Default::default() + }; + let oracle_storage_pre_fork = PlainStorage::from_iter([ + (GPO_OWNER_SLOT, U256::from_str("0x13d24a7ff6f5ec5ff0e9c40fc3b8c9c01c65437b")?), + (GPO_L1_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_OVERHEAD_SLOT, U256::from(0x38)), + (GPO_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_WHITELIST_SLOT, U256::from_str("0x5300000000000000000000000000000000000003")?), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_BLOB_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(1_000_000_000)), + (GPO_PENALTY_FACTOR_SLOT, U256::from(1_000_000_000)), + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + ]); + state.insert_account_with_storage( + L1_GAS_PRICE_ORACLE_ADDRESS, + oracle_pre_fork.clone(), + oracle_storage_pre_fork.clone(), + ); + + // apply GalileoV2 fork + apply_galileo_v2_hard_fork(&mut state)?; + + // merge transitions + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // check oracle account info + let oracle = bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS).unwrap().clone(); + let code_hash = keccak256(&GALILEO_V2_L1_GAS_PRICE_ORACLE_BYTECODE); + let bytecode = Bytecode::new_raw(GALILEO_V2_L1_GAS_PRICE_ORACLE_BYTECODE); + let expected_oracle_info = + AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; + + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); + + // check oracle storage changeset + let mut storage = oracle.storage.into_iter().collect::<Vec<(U256, StorageSlot)>>(); + storage.sort_by(|(a, _), (b, _)| a.cmp(b)); + for (got, expected) in storage.into_iter().zip(GALILEO_V2_L1_GAS_PRICE_ORACLE_STORAGE) { + assert_eq!(got.0, expected.0); + assert_eq!(got.1, StorageSlot { present_value: expected.1, ..Default::default() }); + } + + // check oracle original storage + for (slot, value) in oracle_storage_pre_fork { + assert_eq!(state.storage(L1_GAS_PRICE_ORACLE_ADDRESS, slot)?, value) + } + + // check deployed contract + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + + Ok(()) + } + + #[test] + fn test_apply_galileo_v2_fork_only_once() -> eyre::Result<()> { + let bytecode = Bytecode::new_raw(GALILEO_V2_L1_GAS_PRICE_ORACLE_BYTECODE); + + let oracle_account = AccountInfo { + code_hash: bytecode.hash_slow(), + code: Some(bytecode), + ..Default::default() + }; + + let oracle_storage = PlainStorage::from_iter([ + (GPO_OWNER_SLOT, U256::from_str("0x13d24a7ff6f5ec5ff0e9c40fc3b8c9c01c65437b")?), + (GPO_L1_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_OVERHEAD_SLOT, U256::from(0x38)), + (GPO_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_WHITELIST_SLOT, U256::from_str("0x5300000000000000000000000000000000000003")?), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(0x15f50e5e)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_BLOB_SCALAR_SLOT, U256::from(0x3e95ba80)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(1_100_000_000u64)), + (GPO_PENALTY_FACTOR_SLOT, U256::from(3_000_000_000u64)), + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + (GPO_IS_GALILEO_SLOT, U256::from(1)), + ]); + + // init state, + // we write to db directly to make sure we do not have account storage in cache + let mut db = CacheDB::new(EmptyDB::default()); + + db.insert_account_info(L1_GAS_PRICE_ORACLE_ADDRESS, oracle_account); + + for (slot, value) in oracle_storage { + db.insert_account_storage(L1_GAS_PRICE_ORACLE_ADDRESS, slot, value).unwrap(); + } + + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + // make sure account is in cache + state.load_cache_account(L1_GAS_PRICE_ORACLE_ADDRESS)?; + + // apply GalileoV2 fork + apply_galileo_v2_hard_fork(&mut state)?; + + // merge transitions + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // isGalileo is already set, apply_galileo_v2_hard_fork should be a no-op + assert_eq!(bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS), None); + + Ok(()) + } +}
diff --git reth/crates/scroll/alloy/evm/src/block/mod.rs scroll-reth/crates/scroll/alloy/evm/src/block/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c7315c9a9710b7efed1f9f0e10f2b81d4ed9542 --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/block/mod.rs @@ -0,0 +1,433 @@ +pub mod curie; +pub mod feynman; +pub mod galileo_v2; + +pub use receipt_builder::{ReceiptBuilderCtx, ScrollReceiptBuilder}; +mod receipt_builder; + +use crate::{ + block::{ + curie::apply_curie_hard_fork, feynman::apply_feynman_hard_fork, + galileo_v2::apply_galileo_v2_hard_fork, + }, + gas_price_oracle::L1_GAS_PRICE_ORACLE_ADDRESS, + system_caller::ScrollSystemCaller, + FromTxWithCompressionInfo, ScrollDefaultPrecompilesFactory, ScrollEvm, ScrollEvmFactory, + ScrollPrecompilesFactory, ScrollTransactionIntoTxEnv, ToTxWithCompressionInfo, +}; +use alloc::{boxed::Box, format, vec::Vec}; + +use alloy_consensus::{Transaction, TxReceipt, Typed2718}; +use alloy_eips::Encodable2718; +use alloy_evm::{ + block::{ + BlockExecutionError, BlockExecutionResult, BlockExecutor, BlockExecutorFactory, + BlockExecutorFor, BlockValidationError, ExecutableTx, OnStateHook, + }, + Database, Evm, EvmFactory, FromRecoveredTx, FromTxWithEncoded, +}; +use alloy_primitives::{B256, U256}; +use revm::{ + context::{ + result::{InvalidTransaction, ResultAndState}, + Block, TxEnv, + }, + database::State, + handler::PrecompileProvider, + interpreter::InterpreterResult, + DatabaseCommit, Inspector, +}; +use revm_scroll::builder::ScrollContext; +use scroll_alloy_consensus::L1_MESSAGE_TRANSACTION_TYPE; +use scroll_alloy_hardforks::{ScrollHardfork, ScrollHardforks}; + +/// Compression info is a pair of (compression ratio, compressed size). +pub type ScrollTxCompressionInfo = (U256, usize); + +/// A cache for transaction compression infos, i.e. (compression ratio, compressed size) pairs. +pub type ScrollTxCompressionInfos = Vec<ScrollTxCompressionInfo>; + +/// Context for Scroll Block Execution. +#[derive(Debug, Default, Clone)] +pub struct ScrollBlockExecutionCtx { + /// Parent block hash. + pub parent_hash: B256, +} + +/// Block executor for Scroll. +#[derive(Debug)] +pub struct ScrollBlockExecutor<Evm, R: ScrollReceiptBuilder, Spec> { + /// Spec. + spec: Spec, + /// Receipt builder. + receipt_builder: R, + /// The EVM used by executor. + evm: Evm, + /// Context for block execution. + ctx: ScrollBlockExecutionCtx, + /// Receipts of executed transactions. + receipts: Vec<R::Receipt>, + /// Total gas used by executed transactions. + gas_used: u64, + /// Utility to call system smart contracts. + system_caller: ScrollSystemCaller<Spec>, +} + +impl<E, R: ScrollReceiptBuilder, Spec> ScrollBlockExecutor<E, R, Spec> { + /// Returns the spec for [`ScrollBlockExecutor`]. + pub const fn spec(&self) -> &Spec { + &self.spec + } +} + +impl<E, R, Spec> ScrollBlockExecutor<E, R, Spec> +where + E: EvmExt, + R: ScrollReceiptBuilder, + Spec: ScrollHardforks + Clone, +{ + /// Creates a new [`ScrollBlockExecutor`]. + pub fn new(evm: E, ctx: ScrollBlockExecutionCtx, spec: Spec, receipt_builder: R) -> Self { + Self { + evm, + ctx, + system_caller: ScrollSystemCaller::new(spec.clone()), + spec, + receipt_builder, + receipts: Vec::new(), + gas_used: 0, + } + } +} + +impl<'db, DB, E, R, Spec> ScrollBlockExecutor<E, R, Spec> +where + DB: Database + 'db, + E: EvmExt< + DB = &'db mut State<DB>, + Tx: FromRecoveredTx<R::Transaction> + + FromTxWithEncoded<R::Transaction> + + FromTxWithCompressionInfo<R::Transaction>, + >, + R: ScrollReceiptBuilder<Transaction: Transaction + Encodable2718, Receipt: TxReceipt>, + Spec: ScrollHardforks, +{ + /// Executes all transactions in a block, applying pre and post execution changes. The provided + /// transaction compression infos are expected to be in the same order as the + /// transactions. + pub fn execute_block_with_compression_cache( + mut self, + transactions: impl IntoIterator< + Item = impl ExecutableTx<Self> + + ToTxWithCompressionInfo<<Self as BlockExecutor>::Transaction>, + >, + compression_infos: impl IntoIterator<Item = ScrollTxCompressionInfo>, + ) -> Result<BlockExecutionResult<R::Receipt>, BlockExecutionError> + where + Self: Sized, + { + self.apply_pre_execution_changes()?; + + for (tx, (compression_ratio, compressed_size)) in + transactions.into_iter().zip(compression_infos.into_iter()) + { + let tx = tx.with_compression_info(compression_ratio, compressed_size); + self.execute_transaction(&tx)?; + } + + self.apply_post_execution_changes() + } +} + +impl<'db, DB, E, R, Spec> BlockExecutor for ScrollBlockExecutor<E, R, Spec> +where + DB: Database + 'db, + E: EvmExt< + DB = &'db mut State<DB>, + Tx: FromRecoveredTx<R::Transaction> + FromTxWithEncoded<R::Transaction>, + >, + R: ScrollReceiptBuilder<Transaction: Transaction + Encodable2718, Receipt: TxReceipt>, + Spec: ScrollHardforks, +{ + type Transaction = R::Transaction; + type Receipt = R::Receipt; + type Evm = E; + + fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { + // set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + self.spec.is_spurious_dragon_active_at_block(self.evm.block().number().to()); + self.evm.db_mut().set_state_clear_flag(state_clear_flag); + + // load the l1 gas oracle contract in cache. + let _ = self + .evm + .db_mut() + .load_cache_account(L1_GAS_PRICE_ORACLE_ADDRESS) + .map_err(BlockExecutionError::other)?; + + // apply gas oracle predeploy upgrade at Curie transition block. + #[allow(clippy::collapsible_if)] + if self + .spec + .scroll_fork_activation(ScrollHardfork::Curie) + .transitions_at_block(self.evm.block().number().to()) + { + if let Err(err) = apply_curie_hard_fork(self.evm.db_mut()) { + return Err(BlockExecutionError::msg(format!( + "error occurred at Curie fork: {err:?}" + ))); + }; + } + + // apply gas oracle predeploy upgrade at Feynman transition block. + #[allow(clippy::collapsible_if)] + if self + .spec + .scroll_fork_activation(ScrollHardfork::Feynman) + .active_at_timestamp(self.evm.block().timestamp().to()) + { + if let Err(err) = apply_feynman_hard_fork(self.evm.db_mut()) { + return Err(BlockExecutionError::msg(format!( + "error occurred at Feynman fork: {err:?}" + ))); + }; + } + + // apply gas oracle predeploy upgrade at GalileoV2 transition block. + #[allow(clippy::collapsible_if)] + if self + .spec + .scroll_fork_activation(ScrollHardfork::GalileoV2) + .active_at_timestamp(self.evm.block().timestamp().to()) + { + if let Err(err) = apply_galileo_v2_hard_fork(self.evm.db_mut()) { + return Err(BlockExecutionError::msg(format!( + "error occurred at GalileoV2 fork: {err:?}" + ))); + }; + } + + // apply eip-2935. + self.system_caller.apply_blockhashes_contract_call(self.ctx.parent_hash, &mut self.evm)?; + + Ok(()) + } + + fn execute_transaction_without_commit( + &mut self, + tx: impl ExecutableTx<Self>, + ) -> Result<ResultAndState<<Self::Evm as Evm>::HaltReason>, BlockExecutionError> { + let chain_spec = &self.spec; + let is_l1_message = tx.tx().ty() == L1_MESSAGE_TRANSACTION_TYPE; + // The sum of the transaction’s gas limit and the gas utilized in this block prior, + // must be no greater than the block’s gasLimit. + let block_available_gas = self.evm.block().gas_limit() - self.gas_used; + if tx.tx().gas_limit() > block_available_gas { + return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: tx.tx().gas_limit(), + block_available_gas, + } + .into()) + } + + let hash = tx.tx().trie_hash(); + + let block = self.evm.block(); + // verify the transaction type is accepted by the current fork. + if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number().to()) { + return Err(BlockValidationError::InvalidTx { + hash, + error: Box::new(InvalidTransaction::Eip2930NotSupported), + } + .into()) + } + if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number().to()) { + return Err(BlockValidationError::InvalidTx { + hash, + error: Box::new(InvalidTransaction::Eip1559NotSupported), + } + .into()) + } + if tx.tx().is_eip4844() { + return Err(BlockValidationError::InvalidTx { + hash, + error: Box::new(InvalidTransaction::Eip4844NotSupported), + } + .into()) + } + if tx.tx().is_eip7702() && + !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp().to()) + { + return Err(BlockValidationError::InvalidTx { + hash, + error: Box::new(InvalidTransaction::Eip7702NotSupported), + } + .into()) + } + + // disable the base fee and nonce checks for l1 messages. + self.evm.with_base_fee_check(!is_l1_message); + self.evm.with_nonce_check(!is_l1_message); + + // execute and return the result. + self.evm.transact(&tx).map_err(move |err| BlockExecutionError::evm(err, hash)) + } + + fn commit_transaction( + &mut self, + output: ResultAndState<<Self::Evm as Evm>::HaltReason>, + tx: impl ExecutableTx<Self>, + ) -> Result<u64, BlockExecutionError> { + let ResultAndState { result, state } = output; + let is_l1_message = tx.tx().ty() == L1_MESSAGE_TRANSACTION_TYPE; + + let l1_fee = if is_l1_message { + U256::ZERO + } else { + // compute l1 fee for all non-l1 transaction + self.evm.l1_fee().expect("l1 fee loaded") + }; + + let gas_used = result.gas_used(); + self.gas_used += gas_used; + + let ctx = ReceiptBuilderCtx::<'_, Self::Transaction, E> { + tx: tx.tx(), + result, + cumulative_gas_used: self.gas_used, + l1_fee, + }; + self.receipts.push(self.receipt_builder.build_receipt(ctx)); + + self.evm.db_mut().commit(state); + + Ok(gas_used) + } + + fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<R::Receipt>), BlockExecutionError> { + Ok(( + self.evm, + BlockExecutionResult { + receipts: self.receipts, + requests: Default::default(), + gas_used: self.gas_used, + blob_gas_used: 0, + }, + )) + } + + fn set_state_hook(&mut self, _hook: Option<Box<dyn OnStateHook>>) {} + + fn evm_mut(&mut self) -> &mut Self::Evm { + &mut self.evm + } + + fn evm(&self) -> &Self::Evm { + &self.evm + } +} + +/// An extension of the [`Evm`] trait for Scroll. +pub trait EvmExt: Evm { + /// Sets whether the evm should enable or disable the base fee checks. + fn with_base_fee_check(&mut self, enabled: bool); + /// Sets whether the evm should enable or disable the nonce checks. + fn with_nonce_check(&mut self, enabled: bool); + /// Returns the l1 fee for the transaction. + fn l1_fee(&self) -> Option<U256>; +} + +impl<DB, I, P> EvmExt for ScrollEvm<DB, I, P> +where + DB: Database, + I: Inspector<ScrollContext<DB>>, + P: PrecompileProvider<ScrollContext<DB>, Output = InterpreterResult>, +{ + fn with_base_fee_check(&mut self, enabled: bool) { + self.ctx_mut().cfg.disable_base_fee = !enabled; + } + + fn with_nonce_check(&mut self, enabled: bool) { + self.ctx_mut().cfg.disable_nonce_check = !enabled; + } + + fn l1_fee(&self) -> Option<U256> { + let l1_block_info = &self.ctx().chain; + let transaction_rlp_bytes = self.ctx().tx.rlp_bytes.as_ref()?; + let compression_ratio = self.ctx().tx.compression_ratio; + let compressed_size = self.ctx().tx.compressed_size; + Some(l1_block_info.calculate_tx_l1_cost( + transaction_rlp_bytes, + self.ctx().cfg.spec, + compression_ratio, + compressed_size, + )) + } +} + +/// Scroll block executor factory. +#[derive(Debug, Clone, Default, Copy)] +pub struct ScrollBlockExecutorFactory<R, Spec = ScrollHardfork, P = ScrollDefaultPrecompilesFactory> +{ + /// Receipt builder. + receipt_builder: R, + /// Chain specification. + spec: Spec, + /// EVM factory. + evm_factory: ScrollEvmFactory<P>, +} + +impl<R, Spec, P> ScrollBlockExecutorFactory<R, Spec, P> { + /// Creates a new [`ScrollBlockExecutorFactory`] with the given receipt builder, spec and + /// factory. + pub const fn new(receipt_builder: R, spec: Spec, evm_factory: ScrollEvmFactory<P>) -> Self { + Self { receipt_builder, spec, evm_factory } + } + + /// Exposes the receipt builder. + pub const fn receipt_builder(&self) -> &R { + &self.receipt_builder + } + + /// Exposes the chain specification. + pub const fn spec(&self) -> &Spec { + &self.spec + } + + /// Exposes the EVM factory. + pub const fn evm_factory(&self) -> &ScrollEvmFactory<P> { + &self.evm_factory + } +} + +impl<R, Spec, P> BlockExecutorFactory for ScrollBlockExecutorFactory<R, Spec, P> +where + R: ScrollReceiptBuilder<Transaction: Transaction + Encodable2718, Receipt: TxReceipt>, + Spec: ScrollHardforks, + P: ScrollPrecompilesFactory, + ScrollTransactionIntoTxEnv<TxEnv>: + FromRecoveredTx<R::Transaction> + FromTxWithEncoded<R::Transaction>, + Self: 'static, +{ + type EvmFactory = ScrollEvmFactory<P>; + type ExecutionCtx<'a> = ScrollBlockExecutionCtx; + type Transaction = R::Transaction; + type Receipt = R::Receipt; + + fn evm_factory(&self) -> &Self::EvmFactory { + &self.evm_factory + } + + fn create_executor<'a, DB, I>( + &'a self, + evm: <Self::EvmFactory as EvmFactory>::Evm<&'a mut State<DB>, I>, + ctx: Self::ExecutionCtx<'a>, + ) -> impl BlockExecutorFor<'a, Self, DB, I> + where + DB: Database + 'a, + I: Inspector<<Self::EvmFactory as EvmFactory>::Context<&'a mut State<DB>>> + 'a, + { + ScrollBlockExecutor::new(evm, ctx, &self.spec, &self.receipt_builder) + } +}
diff --git reth/crates/scroll/alloy/evm/src/block/receipt_builder.rs scroll-reth/crates/scroll/alloy/evm/src/block/receipt_builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..eab12b25ba75b95c9f7cbcaeff5f1119cebeacee --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/block/receipt_builder.rs @@ -0,0 +1,32 @@ +use alloy_evm::Evm; +use alloy_primitives::U256; +use core::fmt::Debug; +use revm::context::result::ExecutionResult; + +/// Context for building a receipt. +#[derive(Debug)] +pub struct ReceiptBuilderCtx<'a, T, E: Evm> { + /// Transaction + pub tx: &'a T, + /// Result of transaction execution. + pub result: ExecutionResult<E::HaltReason>, + /// Cumulative gas used. + pub cumulative_gas_used: u64, + /// L1 fee. + pub l1_fee: U256, +} + +/// Type that knows how to build a receipt based on execution result. +#[auto_impl::auto_impl(&, Arc)] +pub trait ScrollReceiptBuilder: Debug { + /// Transaction type. + type Transaction; + /// Receipt type. + type Receipt; + + /// Builds a receipt given a transaction and the result of the execution. + fn build_receipt<'a, E: Evm>( + &self, + ctx: ReceiptBuilderCtx<'a, Self::Transaction, E>, + ) -> Self::Receipt; +}
diff --git reth/crates/scroll/alloy/evm/src/gas_price_oracle.rs scroll-reth/crates/scroll/alloy/evm/src/gas_price_oracle.rs new file mode 100644 index 0000000000000000000000000000000000000000..6a8ec82b42db317b1e9946a7d0d0a3084eb2ca7c --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/gas_price_oracle.rs @@ -0,0 +1,88 @@ +//! L1 Gas Price Oracle constants. + +use revm::primitives::{address, Address, U256}; + +/// L1 gas price oracle address. +/// <https://scrollscan.com/address/0x5300000000000000000000000000000000000002> +pub const L1_GAS_PRICE_ORACLE_ADDRESS: Address = + address!("5300000000000000000000000000000000000002"); + +// forge inspect src/L2/predeploys/L1GasPriceOracle.sol:L1GasPriceOracle storageLayout +// ╭------------------+---------------------+------+--------+-------╮ +// | Name | Type | Slot | Offset | Bytes | +// +================================================================+ +// | owner | address | 0 | 0 | 20 | +// |------------------+---------------------+------+--------+-------+ +// | l1BaseFee | uint256 | 1 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | overhead | uint256 | 2 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | scalar | uint256 | 3 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | whitelist | contract IWhitelist | 4 | 0 | 20 | +// |------------------+---------------------+------+--------+-------+ +// | l1BlobBaseFee | uint256 | 5 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | commitScalar | uint256 | 6 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | blobScalar | uint256 | 7 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | isCurie | bool | 8 | 0 | 1 | +// |------------------+---------------------+------+--------+-------+ +// | penaltyThreshold | uint256 | 9 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | penaltyFactor | uint256 | 10 | 0 | 32 | +// |------------------+---------------------+------+--------+-------+ +// | isFeynman | bool | 11 | 0 | 1 | +// |------------------+---------------------+------+--------+-------+ +// | __gap | uint248 | 11 | 1 | 31 | +// |------------------+---------------------+------+--------+-------+ +// | isGalileo | bool | 12 | 0 | 1 | +// ╰------------------+---------------------+------+--------+-------╯ + +/// Storage slot for `owner` in the `L1GasPriceOracle` contract. +pub const GPO_OWNER_SLOT: U256 = U256::from_limbs([0, 0, 0, 0]); + +/// Storage slot for `l1BaseFee` in the `L1GasPriceOracle` contract. +pub const GPO_L1_BASE_FEE_SLOT: U256 = U256::from_limbs([1, 0, 0, 0]); + +/// Storage slot for `overhead` in the `L1GasPriceOracle` contract. +pub const GPO_OVERHEAD_SLOT: U256 = U256::from_limbs([2, 0, 0, 0]); + +/// Storage slot for `scalar` in the `L1GasPriceOracle` contract. +pub const GPO_SCALAR_SLOT: U256 = U256::from_limbs([3, 0, 0, 0]); + +/// Storage slot for `whitelist` in the `L1GasPriceOracle` contract. +pub const GPO_WHITELIST_SLOT: U256 = U256::from_limbs([4, 0, 0, 0]); + +/// Storage slot for `blobBaseFee` in the `L1GasPriceOracle` contract. +/// Added in the Curie fork. +pub const GPO_L1_BLOB_BASE_FEE_SLOT: U256 = U256::from_limbs([5, 0, 0, 0]); + +/// Storage slot for `commitScalar` in the `L1GasPriceOracle` contract. +/// Added in the Curie fork. +pub const GPO_COMMIT_SCALAR_SLOT: U256 = U256::from_limbs([6, 0, 0, 0]); + +/// Storage slot for `blobScalar` in the `L1GasPriceOracle` contract. +/// Added in the Curie fork. +pub const GPO_BLOB_SCALAR_SLOT: U256 = U256::from_limbs([7, 0, 0, 0]); + +/// Storage slot for `isCurie` in the `L1GasPriceOracle` contract. +/// Added in the Curie fork. +pub const GPO_IS_CURIE_SLOT: U256 = U256::from_limbs([8, 0, 0, 0]); + +/// Storage slot for `penaltyThreshold` in the `L1GasPriceOracle` contract. +/// Added in the Feynman fork. +pub const GPO_PENALTY_THRESHOLD_SLOT: U256 = U256::from_limbs([9, 0, 0, 0]); + +/// Storage slot for `penaltyFactor` in the `L1GasPriceOracle` contract. +/// Added in the Feynman fork. +pub const GPO_PENALTY_FACTOR_SLOT: U256 = U256::from_limbs([10, 0, 0, 0]); + +/// Storage slot for `isFeynman` in the `L1GasPriceOracle` contract. +/// Added in the Feynman fork. +pub const GPO_IS_FEYNMAN_SLOT: U256 = U256::from_limbs([11, 0, 0, 0]); + +/// Storage slot for `isGalileo` in the `L1GasPriceOracle` contract. +/// Added in the Galileo fork. +pub const GPO_IS_GALILEO_SLOT: U256 = U256::from_limbs([12, 0, 0, 0]);
diff --git reth/crates/scroll/alloy/evm/src/lib.rs scroll-reth/crates/scroll/alloy/evm/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..7f1a9238e8740d9d6c6581ea51fb66cd469b7ddf --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/lib.rs @@ -0,0 +1,270 @@ +//! Alloy Evm API for Scroll. + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +mod block; +pub use block::{ + curie, feynman, galileo_v2, EvmExt, ReceiptBuilderCtx, ScrollBlockExecutionCtx, + ScrollBlockExecutor, ScrollBlockExecutorFactory, ScrollReceiptBuilder, ScrollTxCompressionInfo, + ScrollTxCompressionInfos, +}; + +pub mod gas_price_oracle; + +mod tx; +pub use tx::{ + compute_compressed_size, compute_compression_ratio, FromTxWithCompressionInfo, + ScrollTransactionIntoTxEnv, ToTxWithCompressionInfo, WithCompressionInfo, +}; + +mod system_caller; + +extern crate alloc; + +use alloy_evm::{precompiles::PrecompilesMap, Database, Evm, EvmEnv, EvmFactory}; +use alloy_primitives::{Address, Bytes}; +use core::{ + fmt, + ops::{Deref, DerefMut}, +}; +use revm::{ + context::{result::HaltReason, BlockEnv, TxEnv}, + context_interface::result::{EVMError, ResultAndState}, + handler::PrecompileProvider, + inspector::NoOpInspector, + interpreter::{interpreter::EthInterpreter, InterpreterResult}, + Context, ExecuteEvm, InspectEvm, Inspector, SystemCallEvm, +}; +use revm_scroll::{ + builder::{ + DefaultScrollContext, EuclidEipActivations, FeynmanEipActivations, ScrollBuilder, + ScrollContext, + }, + instructions::ScrollInstructions, + precompile::ScrollPrecompileProvider, + ScrollSpecId, +}; + +/// Re-export `TX_L1_FEE_PRECISION_U256` from `revm-scroll` for convenience. +pub use revm_scroll::l1block::TX_L1_FEE_PRECISION_U256; + +/// Scroll EVM implementation. +#[allow(missing_debug_implementations)] +pub struct ScrollEvm<DB: Database, I, P = ScrollPrecompileProvider> { + inner: revm_scroll::ScrollEvm< + ScrollContext<DB>, + I, + ScrollInstructions<EthInterpreter, ScrollContext<DB>>, + P, + >, + inspect: bool, +} + +impl<DB: Database, I, P> ScrollEvm<DB, I, P> { + /// Creates a new instance of [`ScrollEvm`]. + pub const fn new( + inner: revm_scroll::ScrollEvm< + ScrollContext<DB>, + I, + ScrollInstructions<EthInterpreter, ScrollContext<DB>>, + P, + >, + inspect: bool, + ) -> Self { + Self { inner, inspect } + } + + /// Provides a reference to the EVM context. + pub const fn ctx(&self) -> &ScrollContext<DB> { + &self.inner.0.ctx + } + + /// Provides a mutable reference to the EVM context. + pub const fn ctx_mut(&mut self) -> &mut ScrollContext<DB> { + &mut self.inner.0.ctx + } +} + +impl<DB: Database, I, P> Deref for ScrollEvm<DB, I, P> { + type Target = ScrollContext<DB>; + + #[inline] + fn deref(&self) -> &Self::Target { + self.ctx() + } +} + +impl<DB: Database, I, P> DerefMut for ScrollEvm<DB, I, P> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + self.ctx_mut() + } +} + +impl<DB, I, P> Evm for ScrollEvm<DB, I, P> +where + DB: Database, + I: Inspector<ScrollContext<DB>>, + P: PrecompileProvider<ScrollContext<DB>, Output = InterpreterResult>, +{ + type DB = DB; + type Tx = ScrollTransactionIntoTxEnv<TxEnv>; + type Error = EVMError<DB::Error>; + type HaltReason = HaltReason; + type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; + type Precompiles = P; + type Inspector = I; + + fn block(&self) -> &Self::BlockEnv { + &self.block + } + + fn chain_id(&self) -> u64 { + self.cfg.chain_id + } + + fn transact_raw( + &mut self, + tx: Self::Tx, + ) -> Result<ResultAndState<Self::HaltReason>, Self::Error> { + if self.inspect { + self.inner.inspect_tx(tx.into()) + } else { + self.inner.transact(tx.into()) + } + } + + fn transact_system_call( + &mut self, + caller: Address, + contract: Address, + data: Bytes, + ) -> Result<ResultAndState<Self::HaltReason>, Self::Error> { + self.inner.system_call_with_caller(caller, contract, data) + } + + fn db_mut(&mut self) -> &mut Self::DB { + &mut self.journaled_state.database + } + + fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>) + where + Self: Sized, + { + let Context { block: block_env, cfg: cfg_env, journaled_state, .. } = self.inner.0.ctx; + + (journaled_state.database, EvmEnv { block_env, cfg_env }) + } + + fn set_inspector_enabled(&mut self, enabled: bool) { + self.inspect = enabled; + } + + fn precompiles(&self) -> &Self::Precompiles { + &self.inner.0.precompiles + } + + fn precompiles_mut(&mut self) -> &mut Self::Precompiles { + &mut self.inner.0.precompiles + } + + fn inspector(&self) -> &Self::Inspector { + &self.inner.0.inspector + } + + fn inspector_mut(&mut self) -> &mut Self::Inspector { + &mut self.inner.0.inspector + } + + fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) { + ( + &self.inner.0.ctx.journaled_state.database, + &self.inner.0.inspector, + &self.inner.0.precompiles, + ) + } + + fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) { + ( + &mut self.inner.0.ctx.journaled_state.database, + &mut self.inner.0.inspector, + &mut self.inner.0.precompiles, + ) + } +} + +/// Factory producing [`ScrollEvm`]s. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct ScrollEvmFactory<P = ScrollDefaultPrecompilesFactory> { + _precompiles_factory: core::marker::PhantomData<P>, +} + +impl<P: ScrollPrecompilesFactory> EvmFactory for ScrollEvmFactory<P> { + type Evm<DB: Database, I: Inspector<ScrollContext<DB>>> = ScrollEvm<DB, I, Self::Precompiles>; + type Context<DB: Database> = ScrollContext<DB>; + type Tx = ScrollTransactionIntoTxEnv<TxEnv>; + type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError>; + type HaltReason = HaltReason; + type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; + type Precompiles = PrecompilesMap; + + fn create_evm<DB: Database>( + &self, + db: DB, + input: EvmEnv<ScrollSpecId>, + ) -> Self::Evm<DB, NoOpInspector> { + let spec_id = input.cfg_env.spec; + ScrollEvm { + inner: Context::scroll() + .with_db(db) + .with_block(input.block_env) + .with_cfg(input.cfg_env) + .maybe_with_eip_7702() + .maybe_with_eip_7623() + .build_scroll_with_inspector(NoOpInspector {}) + .with_precompiles(P::with_spec(spec_id)), + inspect: false, + } + } + + fn create_evm_with_inspector<DB: Database, I: Inspector<Self::Context<DB>>>( + &self, + db: DB, + input: EvmEnv<ScrollSpecId>, + inspector: I, + ) -> Self::Evm<DB, I> { + let spec_id = input.cfg_env.spec; + ScrollEvm { + inner: Context::scroll() + .with_db(db) + .with_block(input.block_env) + .with_cfg(input.cfg_env) + .maybe_with_eip_7702() + .maybe_with_eip_7623() + .build_scroll_with_inspector(inspector) + .with_precompiles(P::with_spec(spec_id)), + inspect: true, + } + } +} + +/// A factory trait for creating precompiles for Scroll EVM. +pub trait ScrollPrecompilesFactory: Default + fmt::Debug { + /// Creates a new instance of precompiles for the given Scroll specification ID. + fn with_spec(spec: ScrollSpecId) -> PrecompilesMap; +} + +/// Default implementation of the Scroll precompiles factory. +#[derive(Default, Debug, Copy, Clone)] +pub struct ScrollDefaultPrecompilesFactory; + +impl ScrollPrecompilesFactory for ScrollDefaultPrecompilesFactory { + fn with_spec(spec_id: ScrollSpecId) -> PrecompilesMap { + PrecompilesMap::from_static(ScrollPrecompileProvider::new_with_spec(spec_id).precompiles()) + } +}
diff --git reth/crates/scroll/alloy/evm/src/system_caller.rs scroll-reth/crates/scroll/alloy/evm/src/system_caller.rs new file mode 100644 index 0000000000000000000000000000000000000000..67d17bdbc34c72d52b209a979080865aa0d40872 --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/system_caller.rs @@ -0,0 +1,207 @@ +use alloc::string::ToString; + +use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; +use alloy_evm::{ + block::{BlockExecutionError, BlockValidationError}, + Evm, +}; +use alloy_primitives::B256; +use revm::{ + context::{result::ResultAndState, Block}, + DatabaseCommit, +}; +use scroll_alloy_hardforks::ScrollHardforks; + +/// An ephemeral helper type for executing system calls. +/// +/// This can be used to chain system transaction calls. +#[derive(Debug)] +pub(crate) struct ScrollSystemCaller<Spec> { + spec: Spec, +} + +impl<Spec> ScrollSystemCaller<Spec> { + /// Create a new system caller with the given spec. + pub(crate) const fn new(spec: Spec) -> Self { + Self { spec } + } +} + +impl<Spec> ScrollSystemCaller<Spec> +where + Spec: ScrollHardforks, +{ + /// Applies the pre-block call to the EIP-2935 blockhashes contract. + pub(crate) fn apply_blockhashes_contract_call( + &self, + parent_block_hash: B256, + evm: &mut impl Evm<DB: DatabaseCommit>, + ) -> Result<(), BlockExecutionError> { + let result_and_state = + transact_blockhashes_contract_call(&self.spec, parent_block_hash, evm)?; + + if let Some(res) = result_and_state { + evm.db_mut().commit(res.state); + } + + Ok(()) + } +} + +/// Applies the pre-block call to the [EIP-2935] blockhashes contract, using the given block, +/// chain specification, and EVM. +/// +/// If Feynman is not activated, or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +/// +/// Returns `None` if Feynman is not active or the block is the genesis block, otherwise returns the +/// result of the call. +/// +/// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 +#[inline] +fn transact_blockhashes_contract_call<Halt>( + spec: impl ScrollHardforks, + parent_block_hash: B256, + evm: &mut impl Evm<HaltReason = Halt>, +) -> Result<Option<ResultAndState<Halt>>, BlockExecutionError> { + // if Feynman is not active at timestamp then no system transaction occurs. + if !spec.is_feynman_active_at_timestamp(evm.block().timestamp().to()) { + return Ok(None); + } + + // if the block number is zero (genesis block) then no system transaction may occur as per + // EIP-2935 + if evm.block().number().to::<u64>() == 0u64 { + return Ok(None); + } + + let res = match evm.transact_system_call( + alloy_eips::eip4788::SYSTEM_ADDRESS, + HISTORY_STORAGE_ADDRESS, + parent_block_hash.0.into(), + ) { + Ok(res) => res, + Err(e) => { + return Err(BlockValidationError::BlockHashContractCall { message: e.to_string() }.into()) + } + }; + + Ok(Some(res)) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{convert::Infallible, sync::Arc}; + + use crate::gas_price_oracle::L1_GAS_PRICE_ORACLE_ADDRESS; + use alloy_consensus::{Block, BlockBody, Header}; + use alloy_eips::eip2935::HISTORY_STORAGE_CODE; + use alloy_hardforks::ForkCondition; + use alloy_primitives::{keccak256, U256}; + use reth_evm::ConfigureEvm; + use reth_scroll_chainspec::{ScrollChainConfig, ScrollChainSpecBuilder}; + use reth_scroll_evm::ScrollEvmConfig; + use revm::{ + bytecode::Bytecode, + database::{EmptyDBTyped, State}, + state::AccountInfo, + Database, + }; + use scroll_alloy_consensus::ScrollTxEnvelope; + use scroll_alloy_hardforks::{ScrollChainHardforks, ScrollHardfork}; + + #[test] + fn test_should_not_apply_blockhashes_contract_call_before_feynman() { + // initiate system caller. + let system_caller = ScrollSystemCaller::new(ScrollChainHardforks::new([ + (ScrollHardfork::EuclidV2, ForkCondition::Timestamp(0)), + (ScrollHardfork::Feynman, ForkCondition::Timestamp(100)), + ])); + + // initiate db with system contract. + let db = EmptyDBTyped::<Infallible>::new(); + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + state.insert_account( + HISTORY_STORAGE_ADDRESS, + AccountInfo { + code_hash: keccak256(HISTORY_STORAGE_CODE.clone()), + code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), + ..Default::default() + }, + ); + + // load l1 oracle in state. + state.insert_account(L1_GAS_PRICE_ORACLE_ADDRESS, Default::default()); + + // prepare chain spec. + let chain_spec = + Arc::new(ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet())); + let evm_config = ScrollEvmConfig::scroll(chain_spec); + + let header = Header { + parent_hash: B256::random(), + number: 1, + gas_limit: 20_000_000, + ..Default::default() + }; + let block: Block<ScrollTxEnvelope, _> = Block { header, body: BlockBody::default() }; + + // initiate the evm and apply the block hashes contract call. + let mut evm = + evm_config.evm_for_block(state, &block.header).expect("failed to get evm for block"); + system_caller.apply_blockhashes_contract_call(block.parent_hash, &mut evm).unwrap(); + + // assert the storage slot remains unchanged. + let parent_hash = evm.db_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(); + assert_eq!(parent_hash, U256::ZERO); + } + + #[test] + fn test_should_apply_blockhashes_contract_call_after_feynman() { + // initiate system caller. + let system_caller = ScrollSystemCaller::new(ScrollChainHardforks::new([( + ScrollHardfork::Feynman, + ForkCondition::Timestamp(0), + )])); + + // initiate db with system contract. + let db = EmptyDBTyped::<Infallible>::new(); + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + state.insert_account( + HISTORY_STORAGE_ADDRESS, + AccountInfo { + code_hash: keccak256(HISTORY_STORAGE_CODE.clone()), + code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), + ..Default::default() + }, + ); + + // load l1 oracle in state. + state.insert_account(L1_GAS_PRICE_ORACLE_ADDRESS, Default::default()); + + // prepare chain spec. + let chain_spec = + Arc::new(ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet())); + let evm_config = ScrollEvmConfig::scroll(chain_spec); + + let header = Header { + parent_hash: B256::random(), + number: 1, + gas_limit: 20_000_000, + ..Default::default() + }; + let block: Block<ScrollTxEnvelope, _> = Block { header, body: BlockBody::default() }; + + // initiate the evm and apply the block hashes contract call. + let mut evm = + evm_config.evm_for_block(state, &block.header).expect("failed to get evm for block"); + system_caller.apply_blockhashes_contract_call(block.parent_hash, &mut evm).unwrap(); + + // assert the hash is written to storage. + let parent_hash = evm.db_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(); + assert_eq!(Into::<B256>::into(parent_hash), block.parent_hash); + } +}
diff --git reth/crates/scroll/alloy/evm/src/tx/compression.rs scroll-reth/crates/scroll/alloy/evm/src/tx/compression.rs new file mode 100644 index 0000000000000000000000000000000000000000..5bedb26e7836a08e6bb0c54ee37eacbca0f16d78 --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/tx/compression.rs @@ -0,0 +1,342 @@ +use super::FromRecoveredTx; +use crate::ScrollTransactionIntoTxEnv; +use alloy_consensus::transaction::Recovered; +use alloy_eips::{Encodable2718, Typed2718}; +use alloy_evm::{RecoveredTx, ToTxEnv}; +use alloy_primitives::{Address, Bytes, TxKind, U256}; +use revm::context::TxEnv; +use scroll_alloy_consensus::{ScrollTxEnvelope, TxL1Message}; +pub use zstd_compression::{compute_compressed_size, compute_compression_ratio}; + +#[cfg(feature = "zstd_compression")] +mod zstd_compression { + use super::*; + use std::io::Write; + + use encoder_standard::{init_zstd_encoder, N_BLOCK_SIZE_TARGET}; + use revm_scroll::l1block::TX_L1_FEE_PRECISION_U256; + + /// Computes the compressed size for the provided bytes. + /// + /// This is computed as: + /// `min(encoded_size, original_size)` + pub fn compute_compressed_size<T: AsRef<[u8]>>(bytes: &T) -> usize { + // Compressed size of empty data is 0. + if bytes.as_ref().is_empty() { + return 0; + } + + // Instantiate the compressor + let mut compressor = init_zstd_encoder(N_BLOCK_SIZE_TARGET); + + // Set the pledged source size to the length of the bytes + // and write the bytes to the compressor. + let original_bytes_len = bytes.as_ref().len(); + compressor + .set_pledged_src_size(Some(original_bytes_len as u64)) + .expect("failed to set pledged source size"); + compressor.write_all(bytes.as_ref()).expect("failed to write bytes to compressor"); + + // Finish the compression and get the result. + let result = compressor.finish().expect("failed to finish compression"); + let encoded_bytes_len = result.len(); + + if encoded_bytes_len > original_bytes_len { + original_bytes_len + } else { + encoded_bytes_len + } + } + + /// Computes the compression ratio for the provided bytes. + /// + /// This is computed as: + /// `max(1, original_size * TX_L1_FEE_PRECISION_U256 / encoded_size)` + pub fn compute_compression_ratio<T: AsRef<[u8]>>(bytes: &T) -> U256 { + // By definition, the compression ratio of empty data is infinity + if bytes.as_ref().is_empty() { + return U256::MAX + } + + let original_bytes_len = bytes.as_ref().len(); + let encoded_bytes_len = compute_compressed_size(bytes); + + if encoded_bytes_len == original_bytes_len { + return TX_L1_FEE_PRECISION_U256; + } + + // compression_ratio(tx) = size(tx) * PRECISION / size(zstd(tx)) + U256::from(original_bytes_len) + .saturating_mul(TX_L1_FEE_PRECISION_U256) + .wrapping_div(U256::from(encoded_bytes_len)) + } +} + +#[cfg(not(feature = "zstd_compression"))] +mod zstd_compression { + use super::*; + + /// Computes the compressed size for the provided bytes. This panics if the compression + /// feature is not enabled. This is to support `no_std` environments where zstd is not + /// available. + pub fn compute_compressed_size<T: AsRef<[u8]>>(bytes: &T) -> usize { + panic!("Compression feature is not enabled. Please enable the 'compression' feature to use this function."); + } + + /// Computes the compression ratio for the provided bytes. This panics if the compression + /// feature is not enabled. This is to support `no_std` environments where zstd is not + /// available. + pub fn compute_compression_ratio<T: AsRef<[u8]>>(_bytes: &T) -> U256 { + panic!("Compression feature is not enabled. Please enable the 'compression' feature to use this function."); + } +} + +/// A generic wrapper for a type that includes compression info and encoded bytes. +#[derive(Debug, Clone)] +pub struct WithCompressionInfo<T> { + // The original value. + value: T, + // The compression ratio: + // compression_ratio = max(1, size(tx.data) * 1e9 / size(compress(tx.data))) + compression_ratio: U256, + // The compressed size in bytes: + // compressed_size = min(size(zstd(rlp(tx))), size(rlp(tx))) + compressed_size: usize, + // The raw encoded bytes of `value`, without compression. + encoded_bytes: Bytes, +} + +/// A trait for types that can be constructed from a transaction, +/// its sender, encoded bytes, compression ratio, and compressed size. +pub trait FromTxWithCompressionInfo<Tx> { + /// Builds a `TxEnv` from a transaction, its sender, encoded transaction bytes, + /// its compression ratio, and compressed size. + fn from_tx_with_compression_info( + tx: &Tx, + sender: Address, + encoded: Bytes, + compression_ratio: Option<U256>, + compressed_size: Option<usize>, + ) -> Self; +} + +impl<TxEnv, T> FromTxWithCompressionInfo<&T> for TxEnv +where + TxEnv: FromTxWithCompressionInfo<T>, +{ + fn from_tx_with_compression_info( + tx: &&T, + sender: Address, + encoded: Bytes, + compression_ratio: Option<U256>, + compressed_size: Option<usize>, + ) -> Self { + TxEnv::from_tx_with_compression_info( + tx, + sender, + encoded, + compression_ratio, + compressed_size, + ) + } +} + +impl<T, TxEnv: FromTxWithCompressionInfo<T>> ToTxEnv<TxEnv> for WithCompressionInfo<Recovered<T>> { + fn to_tx_env(&self) -> TxEnv { + let recovered = &self.value; + TxEnv::from_tx_with_compression_info( + recovered.inner(), + recovered.signer(), + self.encoded_bytes.clone(), + Some(self.compression_ratio), + Some(self.compressed_size), + ) + } +} + +impl<T, TxEnv: FromTxWithCompressionInfo<T>> ToTxEnv<TxEnv> for WithCompressionInfo<&Recovered<T>> { + fn to_tx_env(&self) -> TxEnv { + let recovered = &self.value; + TxEnv::from_tx_with_compression_info( + recovered.inner(), + *recovered.signer(), + self.encoded_bytes.clone(), + Some(self.compression_ratio), + Some(self.compressed_size), + ) + } +} + +impl FromTxWithCompressionInfo<ScrollTxEnvelope> for ScrollTransactionIntoTxEnv<TxEnv> { + fn from_tx_with_compression_info( + tx: &ScrollTxEnvelope, + caller: Address, + encoded: Bytes, + compression_ratio: Option<U256>, + compressed_size: Option<usize>, + ) -> Self { + let base = match &tx { + ScrollTxEnvelope::Legacy(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip2930(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip1559(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip7702(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::L1Message(tx) => { + let TxL1Message { to, value, gas_limit, input, queue_index: _, sender: _ } = &**tx; + TxEnv { + tx_type: tx.ty(), + caller, + gas_limit: *gas_limit, + kind: TxKind::Call(*to), + value: *value, + data: input.clone(), + gas_price: 0, + gas_priority_fee: None, + chain_id: None, + nonce: 0, + access_list: Default::default(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + } + } + }; + + Self::new(base, Some(encoded), compression_ratio, compressed_size) + } +} + +/// A trait that allows a type to be converted into [`WithCompressionInfo`]. +pub trait ToTxWithCompressionInfo<Tx> { + /// Converts the type into a [`WithCompressionInfo`] instance using the provided compression + /// ratio and compressed size. + fn with_compression_info( + &self, + compression_ratio: U256, + compressed_size: usize, + ) -> WithCompressionInfo<Recovered<&Tx>>; +} + +impl<Tx: Encodable2718> ToTxWithCompressionInfo<Tx> for Recovered<&Tx> { + fn with_compression_info( + &self, + compression_ratio: U256, + compressed_size: usize, + ) -> WithCompressionInfo<Recovered<&Tx>> { + let encoded_bytes = self.inner().encoded_2718(); + WithCompressionInfo { + value: *self, + compression_ratio, + compressed_size, + encoded_bytes: encoded_bytes.into(), + } + } +} + +impl<Tx, T: RecoveredTx<Tx>> RecoveredTx<Tx> for WithCompressionInfo<T> { + fn tx(&self) -> &Tx { + self.value.tx() + } + + fn signer(&self) -> &Address { + self.value.signer() + } +} + +#[cfg(test)] +mod tests { + use crate::compute_compressed_size; + + use super::compute_compression_ratio; + use alloy_primitives::{bytes, uint, U256}; + + #[test] + fn test_compression() -> eyre::Result<()> { + // Compute compression ratio and compressed size for each test case. + // These test cases are meant to be shared between the Go and Rust implementations. + // Note: Feynman's compression ratio is computed on the transaction payload, + // while Galileo's compressed size is computed on the full RLP-encoded transaction. + // In these compression tests we ignore this distinction. + + // eth-transfer + let bytes = bytes!("0x"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::MAX); // empty data is infinitely compressible by definition + assert_eq!(size, 0); + + // scr-transfer + // https://scrollscan.com/tx/0x7b681ce914c9774aff364d2b099b2ba41dea44bcd59dbebb9d4c4b6853893179 + let bytes = bytes!("0xa9059cbb000000000000000000000000687b50a70d33d71f9a82dd330b8c091e4d77250800000000000000000000000000000000000000000000000ac96dda943e512bb9"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(1_387_755_102u64)); // 1.4x + assert_eq!(size, 49); + + // syncswap-swap + // https://scrollscan.com/tx/0x59a7b72503400b6719f3cb670c7b1e7e45ce5076f30b98bdaad3b07a5d0fbc02 + let bytes = bytes!("0x2cc4081e00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000005ec79b80000000000000000000000000000000000000000000000000003328b944c400000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000053000000000000000000000000000000000000040000000000000000000000000000000000000000000000000091a94863ca800000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000814a23b053fd0f102aeeda0459215c2444799c7000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000600000000000000000000000005300000000000000000000000000000000000004000000000000000000000000485ca81b70255da2fe3fd0814b57d1b08fce784e00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(4_857_142_857u64)); // 4.8x + assert_eq!(size, 126); + + // uniswap-swap + // https://scrollscan.com/tx/0x65b268bd8ef416f44983ee277d748de044243272b0f106b71ff03cc8501a05da + let bytes = bytes!("0x5023b4df00000000000000000000000006efdbff2a14a7c8e15944d1f4a48f9f95f663a4000000000000000000000000530000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000001f4000000000000000000000000485ca81b70255da2fe3fd0814b57d1b08fce784e000000000000000000000000000000000000000000000000006a94d74f43000000000000000000000000000000000000000000000000000000000000045af6750000000000000000000000000000000000000000000000000000000000000000"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(2_620_689_655u64)); // 2.6x + assert_eq!(size, 87); + + // etherfi-deposit + // https://scrollscan.com/tx/0x41a77736afd54134b6c673e967c9801e326495074012b4033bd557920cbe5a71 + let bytes = bytes!("0x63baa26000000000000000000000000077a7e3215a621a9935d32a046212ebfcffa3bff900000000000000000000000006efdbff2a14a7c8e15944d1f4a48f9f95f663a400000000000000000000000008c6f91e2b681faf5e17227f2a44c307b3c1364c0000000000000000000000000000000000000000000000000000000002d4cae000000000000000000000000000000000000000000000000000000000028f7f83000000000000000000000000249e3fa81d73244f956ecd529715323b6d02f24b00000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000041a95314c3a11f86cc673f2afd60d27f559cb2edcc0da5af030adffc97f9a5edc3314efbadd32878e289017f644a4afa365da5367fefe583f7c4ff0c6047e2c1ff1b00000000000000000000000000000000000000000000000000000000000000"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(1_788_944_723u64)); // 1.8x + assert_eq!(size, 199); + + // etherfi-stargate + // https://scrollscan.com/tx/0x08bf18e860d4770920ba838fe709ca202227aa9afea1b0c11314e7f41fc5f578 + let bytes = + bytes!("0x5988e7a1000000000000000000000000388325dd7c76e37cfda1ed6d8a97849a46b5512a"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(1_000_000_000)); // 1x + assert_eq!(size, 36); + + // etherfi-openocean + // https://scrollscan.com/tx/0xba29777d4135cb1b1dc462f07d486ad589dd3de74b8a5a64e9ff070cef0db35a + let bytes = bytes!("0x052fc63500000000000000000000000088c6e32066a5ae1dba00b2ea064d30c22a21f847000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b000000000000000000000000f55bec9cafdbe8730f096aa55dad6d22d44099df0000000000000000000000000000000000000000000000051e17bd1af1999e070000000000000000000000000000000000000000000000000000000000ccefaf0000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000204000000000000000000000000000000000000000000000000000000000000020800000000000000000000000000000000000000000000000000000000000001f0490411a320000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a446000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b000000000000000000000000f55bec9cafdbe8730f096aa55dad6d22d44099df0000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a44600000000000000000000000088c6e32066a5ae1dba00b2ea064d30c22a21f8470000000000000000000000000000000000000000000000051e17bd1af1999e070000000000000000000000000000000000000000000000000000000000ccefaf0000000000000000000000000000000000000000000000000000000000cf019f00000000000000000000000000000000000000000000000000000000000000020000000000000000000000002e0be8d3d9f1833fbacf9a5e9f2d470817ff0c0000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000005c0000000000000000000000000000000000000000000000000000000000000082000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000ba00000000000000000000000000000000000000000000000000000000000000d8000000000000000000000000000000000000000000000000000000000000011c000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000019a00000000000000000000000000000000000000000000000000000000000001ac000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064eb5625d9000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b0000000000000000000000007160570bb153edd0ea1775ec2b2ac9b65f1ab61b000000000000000000000000000000000000000000000004e9b0639efc6a8339000000000000000000000000000000000000000000000000000000000000000000000000000000007160570bb153edd0ea1775ec2b2ac9b65f1ab61b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064511de15b000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b000000000000000000000000054641825533d1bc3324df3c30cbc3baea812087000000000000000000000000000000000000000000000004e9b0639efc6a833900000000000000000000000000000000000000000000000000000000000000000000000000000000054641825533d1bc3324df3c30cbc3baea81208700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001247132bb7f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a446000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b0000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a4460000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001a49f865422000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b00000000000000000000000000000001000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d1660f99000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b000000000000000000000000940f31ea73bfea357354b0263b92f3ba70eb3b6100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000643afe5f008000000000000000186a00b4940f31ea73bfea357354b0263b92f3ba70eb3b61000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b0000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a44600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001a49f865422000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b00000000000000000000000000000001000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d1660f99000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b00000000000000000000000095b0c398cf6d296faa551291a9f3bf02a68c7a300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000095b0c398cf6d296faa551291a9f3bf02a68c7a3000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001247132bb7f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a446000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000d29687c813d741e2f938f4ac377128810e217b1b0000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a44600000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000038451a74316000000000000000000000000530000000000000000000000000000000000000400000000000000000000000000000001000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064eb5625d900000000000000000000000053000000000000000000000000000000000000040000000000000000000000007160570bb153edd0ea1775ec2b2ac9b65f1ab61b0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000007160570bb153edd0ea1775ec2b2ac9b65f1ab61b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064511de15b0000000000000000000000005300000000000000000000000000000000000004000000000000000000000000814a23b053fd0f102aeeda0459215c2444799c7000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000044000000000000000000000000000000000000000000000000000000000000004400000000000000000000000000000000000000000000000000000000000000000000000000000000814a23b053fd0f102aeeda0459215c2444799c7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001247132bb7f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a44600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000006000000000000000000000000053000000000000000000000000000000000000040000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a4460000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002449f86542200000000000000000000000006efdbff2a14a7c8e15944d1f4a48f9f95f663a4000000000000000000000000000000140000000000000000000000000000003200000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000024000000000000000000000000882f1fdd5e320e39b6baa8317ec6f0171d1f499800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001043eece7db0000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a446000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fffffff00000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000a7e848aca42d879ef06507fca0e7b33a0a63c1e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002449f86542200000000000000000000000006efdbff2a14a7c8e15944d1f4a48f9f95f663a400000000000000000000000000000001000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000104e5b07cdb000000000000000000000000f1783f3377b3a70465c193ef33942c0803121ba0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000001aa298ae7c53d8dafa200ed49608649bfa76a44600000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000002e06efdbff2a14a7c8e15944d1f4a48f9f95f663a4000064f55bec9cafdbe8730f096aa55dad6d22d44099df0000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000648a6a1e85000000000000000000000000f55bec9cafdbe8730f096aa55dad6d22d44099df000000000000000000000000922164bbbd36acf9e854acbbf32facc949fcaeef0000000000000000000000000000000000000000000000000000000000cf019f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001a49f865422000000000000000000000000f55bec9cafdbe8730f096aa55dad6d22d44099df00000000000000000000000000000001000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000004400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d1660f99000000000000000000000000f55bec9cafdbe8730f096aa55dad6d22d44099df00000000000000000000000088c6e32066a5ae1dba00b2ea064d30c22a21f84700000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000ba5f8a00c2032c01bd7ed4fb2f3c8e95982539bd000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000041eacb429b4309061c97d928fa5d671b6b881dc7f959fe98efcd40e6a749866ab7000ba6acb9da5d2268f4d2acd12164f9036995428dc9cdad753fe205e0ab00da1b00000000000000000000000000000000000000000000000000000000000000"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, uint!(8_992_608_236_U256)); // 9x + assert_eq!(size, 947); + + // edgepushoracle-postupdate + // https://scrollscan.com/tx/0x8271c68146a3b07b1ebf52ce0b550751f49cbd72fa0596ef14ff56d1f23a0bec + let bytes = bytes!("0x49a1a4fb000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000005f725f60000000000000000000000000000000000000000000000000000000003d0cac600000000000000000000000000000000000000000000000000000000685d50cd000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000022000000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000004155903b95865fc5a5dd7d4d876456140dd0b815695647fc41eb1924f4cfe267265130b5a5d77125c44cf6a5a81edba6d5850ba00f90ab83281c9b44e17528fd74010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000416f000e0498733998e6a1a6454e116c1b1f95f7e000400b6a54029406cf288bdc615b62de8e2db533d6010ca57001e0b8a4b3f05ed516a31830516c52b9df206e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410dabc77a807d729ff62c3be740d492d884f026ad2770fa7c4bdec569e201643656b07f2009d2129173738571417734a3df051cebc7b8233bec6d9471c21c098700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041eb009614c939170e9ff3d3e06c3a2c45810fe46a364ce28ecec5e220f5fd86cd6e0f70ab9093dd6b22b69980246496b600c8fcb054047962d4128efa48b692f301000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041a31b4dd4f0a482372d75c7a8c5f11aa8084a5f358579866f1d25a26a15beb2b5153400bfa7fa3d6fba138c02dd1eb8a5a97d62178d98c5632a153396a566e5ed0000000000000000000000000000000000000000000000000000000000000000"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(2_441_805_225u64)); // 2.4x + assert_eq!(size, 421); + + // intmax-post + // https://scrollscan.com/tx/0x7244e27223cdd79ba0f0e3990c746e5d524e35dbcc200f0a7e664ffdc6d08eef + let bytes = bytes!("0x9b6babf0f0372bb253e060ecbdd3dbef8b832b0e743148bd807bfcf665593a56a18bac69000000000000000000000000000000000000000000000000000000006861676d0000000000000000000000000000000000000000000000000000000000000015800000000000000000000000000000000000000000000000000000000000000029a690c4ef1e18884a11f73c8595fb721f964a3e2bee809800c474278f024bcd05a76119827e6c464cee8620f616a9a23d41305eb9f9682f9d2eaf964325fcd71147783453566f27ce103a2398d96719ee22ba51b89b92cdf952af817929329403b75ae310b23cf250041d53c82bef431fa2527e2dd68b49f45f06feb2bd09f011358fe2650b8987ea2bb39bb6e28ce770f4fc9c4f064d0ae7573a1450452b501a5b0d3454d254dbf9db7094f4ca1f5056143f5c70dee4126443a6150d9e51bd05dac7e9a2bd48a8797ac6e9379d400c5ce1815b10846eaf0d80dca3a727ffd0075387e0f1bc1b363c81ecf8d05a4b654ac6fbe1cdc7c741a5c0bbeabde4138906009129ca033af12094fd7306562d9735b2fe757f021b7eb3320f8a814a286a10130969de2783e49871b80e967cfba630e6bdef2fd1d2b1076c6c3f5fd9ae5800000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000000012a98f1556efe81340fad3e59044b8139ce62f1d5d50b44b680de9422b1ddbf1a"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(1_298_578_199u64)); // 1.3x + assert_eq!(size, 422); + + // galileo-mismatch + // https://scrollscan.com/tx/0x773daae3ac1d31cf0ac76dc25504fe76ad5458c7df9578161898b73bd8039c90 + let bytes = bytes!("0xf901ae8265f68301d52c8307040d94c9c35e593842c3d5e71304b2291e204583226e2a80b90144412658e5000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000020027004f00a500d100d500d900dd00ed00fd530000000000000000000000000000000000000406efdbff2a14a7c8e15944d1f4a48f9f95f663a4005300903c813df550a32d4a9d42010d057386429ad2328ed906efdbff2a14a7c8e15944d1f4a48f9f95f663a4530000000000000000000000000000000000000414ffa10b2019d6eb63140ef4b5026c7e049936bfeb00a900bdfeec4e40c170ef3736dc9a29389fb8cd7ed12409ae7bb52a3614ac5b66389d8873a8cb9f45f817c500a900bd000500010000000000000000000000000000187abb94380e0000000000000000000000092f2a085c00000083104ec3a0e3c466f58aa6edab61bd5f2bab117804d6f3cf125a815e53c161c02df8209d09a0267df663ab1cb9a5c85441df5051a7a03623b946a87ff42af694fc21e40f34fb"); + let ratio = compute_compression_ratio(&bytes); + let size = compute_compressed_size(&bytes); + assert_eq!(ratio, U256::from(1_438_538_205u64)); // 1.4x + assert_eq!(size, 301); + + Ok(()) + } +}
diff --git reth/crates/scroll/alloy/evm/src/tx/mod.rs scroll-reth/crates/scroll/alloy/evm/src/tx/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2e3698a30d1554f4199d40d4a83ab7ec148b618a --- /dev/null +++ scroll-reth/crates/scroll/alloy/evm/src/tx/mod.rs @@ -0,0 +1,301 @@ +use alloy_consensus::crypto::secp256k1::recover_signer; +use alloy_eips::{Encodable2718, Typed2718}; +use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv}; +use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; +use core::ops::{Deref, DerefMut}; +use revm::context::{ + either::Either, + transaction::{RecoveredAuthority, RecoveredAuthorization}, + Transaction, TxEnv, +}; +use revm_scroll::ScrollTransaction; +use scroll_alloy_consensus::{ScrollTxEnvelope, TxL1Message, L1_MESSAGE_TRANSACTION_TYPE}; + +mod compression; +pub use compression::{ + compute_compressed_size, compute_compression_ratio, FromTxWithCompressionInfo, + ToTxWithCompressionInfo, WithCompressionInfo, +}; + +/// This structure wraps around a [`ScrollTransaction`] and allows us to implement the [`IntoTxEnv`] +/// trait. This can be removed when the interface is improved. Without this wrapper, we would need +/// to implement the trait in `revm-scroll`, which adds a dependency on `alloy-evm` in the crate. +/// Any changes to `alloy-evm` would require changes to `revm-scroll` which isn't desired. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct ScrollTransactionIntoTxEnv<T: Transaction>(ScrollTransaction<T>); + +impl<T: Transaction> ScrollTransactionIntoTxEnv<T> { + /// Returns a new [`ScrollTransactionIntoTxEnv`]. + pub fn new( + base: T, + rlp_bytes: Option<Bytes>, + compression_ratio: Option<U256>, + compressed_size: Option<usize>, + ) -> Self { + Self(ScrollTransaction::new(base, rlp_bytes, compression_ratio, compressed_size)) + } +} + +impl<T: Transaction> From<ScrollTransaction<T>> for ScrollTransactionIntoTxEnv<T> { + fn from(value: ScrollTransaction<T>) -> Self { + Self(value) + } +} + +impl<T: Transaction> From<ScrollTransactionIntoTxEnv<T>> for ScrollTransaction<T> { + fn from(value: ScrollTransactionIntoTxEnv<T>) -> Self { + value.0 + } +} + +impl<T: Transaction> Deref for ScrollTransactionIntoTxEnv<T> { + type Target = ScrollTransaction<T>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<T: Transaction> DerefMut for ScrollTransactionIntoTxEnv<T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<T: Transaction> IntoTxEnv<Self> for ScrollTransactionIntoTxEnv<T> { + fn into_tx_env(self) -> Self { + self + } +} + +impl<T: Transaction> Transaction for ScrollTransactionIntoTxEnv<T> { + type AccessListItem<'a> + = T::AccessListItem<'a> + where + T: 'a; + type Authorization<'a> + = T::Authorization<'a> + where + T: 'a; + + fn tx_type(&self) -> u8 { + self.0.tx_type() + } + + fn caller(&self) -> Address { + self.0.caller() + } + + fn gas_limit(&self) -> u64 { + self.0.gas_limit() + } + + fn value(&self) -> U256 { + self.0.value() + } + + fn input(&self) -> &Bytes { + self.0.input() + } + + fn nonce(&self) -> u64 { + self.0.nonce() + } + + fn kind(&self) -> TxKind { + self.0.kind() + } + + fn chain_id(&self) -> Option<u64> { + self.0.chain_id() + } + + fn gas_price(&self) -> u128 { + self.0.gas_price() + } + + fn access_list( + &self, + ) -> Option<impl Iterator<Item = <ScrollTransaction<T> as Transaction>::AccessListItem<'_>>> + { + self.0.access_list() + } + + fn blob_versioned_hashes(&self) -> &[B256] { + self.0.blob_versioned_hashes() + } + + fn max_fee_per_blob_gas(&self) -> u128 { + self.0.max_fee_per_blob_gas() + } + + fn authorization_list_len(&self) -> usize { + self.0.authorization_list_len() + } + + fn authorization_list(&self) -> impl Iterator<Item = Self::Authorization<'_>> { + self.0.authorization_list() + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + self.0.max_priority_fee_per_gas() + } +} + +impl FromTxWithEncoded<ScrollTxEnvelope> for ScrollTransactionIntoTxEnv<TxEnv> { + fn from_encoded_tx(tx: &ScrollTxEnvelope, caller: Address, encoded: Bytes) -> Self { + let base = match &tx { + ScrollTxEnvelope::Legacy(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip2930(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip1559(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::Eip7702(tx) => TxEnv::from_recovered_tx(tx.tx(), caller), + ScrollTxEnvelope::L1Message(tx) => { + let TxL1Message { to, value, gas_limit, input, queue_index: _, sender: _ } = &**tx; + TxEnv { + tx_type: tx.ty(), + caller, + gas_limit: *gas_limit, + kind: TxKind::Call(*to), + value: *value, + data: input.clone(), + gas_price: 0, + gas_priority_fee: None, + chain_id: None, + nonce: 0, + access_list: Default::default(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + } + } + }; + + let (encoded, compression_ratio, compressed_size) = match tx { + ScrollTxEnvelope::L1Message(_) => (None, None, None), + _ => { + let ratio = compute_compression_ratio(base.input()); // compute on tx data + let size = compute_compressed_size(&encoded); // compute on rlp-encoded tx + (Some(encoded), Some(ratio), Some(size)) + } + }; + + Self::new(base, encoded, compression_ratio, compressed_size) + } +} + +impl FromRecoveredTx<ScrollTxEnvelope> for ScrollTransactionIntoTxEnv<TxEnv> { + fn from_recovered_tx(tx: &ScrollTxEnvelope, sender: Address) -> Self { + let envelope = tx.encoded_2718(); + + let base = match &tx { + ScrollTxEnvelope::Legacy(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().gas_price, + gas_priority_fee: None, + kind: tx.tx().to, + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: tx.tx().chain_id, + nonce: tx.tx().nonce, + access_list: Default::default(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: 0, + caller: sender, + }, + ScrollTxEnvelope::Eip2930(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().gas_price, + gas_priority_fee: None, + kind: tx.tx().to, + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: Some(tx.tx().chain_id), + nonce: tx.tx().nonce, + access_list: tx.tx().access_list.clone(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: 1, + caller: sender, + }, + ScrollTxEnvelope::Eip1559(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().max_fee_per_gas, + gas_priority_fee: Some(tx.tx().max_priority_fee_per_gas), + kind: tx.tx().to, + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: Some(tx.tx().chain_id), + nonce: tx.tx().nonce, + access_list: tx.tx().access_list.clone(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: 2, + caller: sender, + }, + ScrollTxEnvelope::Eip7702(tx) => TxEnv { + gas_limit: tx.tx().gas_limit, + gas_price: tx.tx().max_fee_per_gas, + gas_priority_fee: Some(tx.tx().max_priority_fee_per_gas), + kind: tx.tx().to.into(), + value: tx.tx().value, + data: tx.tx().input.clone(), + chain_id: Some(tx.tx().chain_id), + nonce: tx.tx().nonce, + access_list: tx.tx().access_list.clone(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: tx + .tx() + .authorization_list + .iter() + .map(|auth| { + Either::Right(RecoveredAuthorization::new_unchecked( + auth.inner().clone(), + auth.signature() + .ok() + .and_then(|signature| { + recover_signer(&signature, auth.signature_hash()).ok() + }) + .map_or(RecoveredAuthority::Invalid, RecoveredAuthority::Valid), + )) + }) + .collect(), + tx_type: 4, + caller: sender, + }, + ScrollTxEnvelope::L1Message(tx) => TxEnv { + gas_limit: tx.gas_limit, + gas_price: 0, + gas_priority_fee: None, + kind: TxKind::Call(tx.to), + value: tx.value, + data: tx.input.clone(), + chain_id: None, + nonce: 0, + access_list: Default::default(), + blob_hashes: Default::default(), + max_fee_per_blob_gas: Default::default(), + authorization_list: Default::default(), + tx_type: L1_MESSAGE_TRANSACTION_TYPE, + caller: sender, + }, + }; + + let (encoded, compression_ratio, compressed_size) = match tx { + ScrollTxEnvelope::L1Message(_) => (None, None, None), + _ => { + let encoded = envelope.into(); + let ratio = compute_compression_ratio(base.input()); // compute on tx data + let size = compute_compressed_size(&encoded); // compute on rlp-encoded tx + (Some(encoded), Some(ratio), Some(size)) + } + }; + + Self::new(base, encoded, compression_ratio, compressed_size) + } +}
diff --git reth/crates/scroll/alloy/hardforks/Cargo.toml scroll-reth/crates/scroll/alloy/hardforks/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7dea06ea9bbaa8c5bc6fbc2477fb2c49420db09a --- /dev/null +++ scroll-reth/crates/scroll/alloy/hardforks/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "scroll-alloy-hardforks" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-hardforks.workspace = true + +# misc +auto_impl = { workspace = true, default-features = false } +serde = { workspace = true, optional = true } + +[features] +default = ["std"] +std = ["serde?/std"] +serde = ["dep:serde", "alloy-hardforks/serde"]
diff --git reth/crates/scroll/alloy/hardforks/src/hardfork.rs scroll-reth/crates/scroll/alloy/hardforks/src/hardfork.rs new file mode 100644 index 0000000000000000000000000000000000000000..29ecd64dc60f3b893546507773961397e3405b2f --- /dev/null +++ scroll-reth/crates/scroll/alloy/hardforks/src/hardfork.rs @@ -0,0 +1,106 @@ +//! Hard forks of scroll protocol. + +use alloy_hardforks::{hardfork, ForkCondition}; + +hardfork!( + /// The name of the Scroll hardfork + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + ScrollHardfork { + /// Archimedes: scroll test hardfork. + Archimedes, + /// Bernoulli: <https://scroll.io/blog/blobs-are-here-scrolls-bernoulli-upgrade>. + Bernoulli, + /// Curie: <https://scroll.io/blog/compressing-the-gas-scrolls-curie-upgrade>. + Curie, + /// Darwin: <https://scroll.io/blog/proof-recursion-scrolls-darwin-upgrade>. + Darwin, + /// DarwinV2 <https://x.com/Scroll_ZKP/status/1830565514755584269>. + DarwinV2, + /// Euclid <https://docs.scroll.io/en/technology/overview/scroll-upgrades/euclid-upgrade/> + Euclid, + /// EuclidV2 <https://docs.scroll.io/en/technology/overview/scroll-upgrades/euclid-upgrade/> + EuclidV2, + /// Feynman <https://docs.scroll.io/en/technology/overview/scroll-upgrades/feynman-upgrade/> + Feynman, + /// Galileo <https://docs.scroll.io/en/technology/overview/scroll-upgrades/galileo-upgrade/> + Galileo, + /// GalileoV2 <https://docs.scroll.io/en/technology/overview/scroll-upgrades/galileo-upgrade/> + GalileoV2, + } +); + +impl ScrollHardfork { + /// Scroll mainnet list of hardforks. + pub const fn scroll_mainnet() -> [(Self, ForkCondition); 10] { + [ + (Self::Archimedes, ForkCondition::Block(0)), + (Self::Bernoulli, ForkCondition::Block(5220340)), + (Self::Curie, ForkCondition::Block(7096836)), + (Self::Darwin, ForkCondition::Timestamp(1724227200)), + (Self::DarwinV2, ForkCondition::Timestamp(1725264000)), + (Self::Euclid, ForkCondition::Timestamp(1744815600)), + (Self::EuclidV2, ForkCondition::Timestamp(1745305200)), + (Self::Feynman, ForkCondition::Timestamp(1755576000)), + (Self::Galileo, ForkCondition::Timestamp(1765868400)), + (Self::GalileoV2, ForkCondition::Timestamp(1766041200)), + ] + } + + /// Scroll sepolia list of hardforks. + pub const fn scroll_sepolia() -> [(Self, ForkCondition); 10] { + [ + (Self::Archimedes, ForkCondition::Block(0)), + (Self::Bernoulli, ForkCondition::Block(3747132)), + (Self::Curie, ForkCondition::Block(4740239)), + (Self::Darwin, ForkCondition::Timestamp(1723622400)), + (Self::DarwinV2, ForkCondition::Timestamp(1724832000)), + (Self::Euclid, ForkCondition::Timestamp(1741680000)), + (Self::EuclidV2, ForkCondition::Timestamp(1741852800)), + (Self::Feynman, ForkCondition::Timestamp(1753167600)), + (Self::Galileo, ForkCondition::Timestamp(1764054000)), + (Self::GalileoV2, ForkCondition::Timestamp(1764831600)), + ] + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn check_scroll_hardfork_from_str() { + let hardfork_str = [ + "BernOulLi", + "CUrie", + "DaRwIn", + "DaRwInV2", + "EUcliD", + "eUClidv2", + "FEYnmaN", + "gaLiLEo", + "gaLiLEov2", + ]; + let expected_hardforks = [ + ScrollHardfork::Bernoulli, + ScrollHardfork::Curie, + ScrollHardfork::Darwin, + ScrollHardfork::DarwinV2, + ScrollHardfork::Euclid, + ScrollHardfork::EuclidV2, + ScrollHardfork::Feynman, + ScrollHardfork::Galileo, + ScrollHardfork::GalileoV2, + ]; + + let hardforks: Vec<ScrollHardfork> = + hardfork_str.iter().map(|h| ScrollHardfork::from_str(h).unwrap()).collect(); + + assert_eq!(hardforks, expected_hardforks); + } + + #[test] + fn check_nonexistent_hardfork_from_str() { + assert!(ScrollHardfork::from_str("not a hardfork").is_err()); + } +}
diff --git reth/crates/scroll/alloy/hardforks/src/lib.rs scroll-reth/crates/scroll/alloy/hardforks/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..6a19b2d6af8ae397e142e4fdd336190adc190fbe --- /dev/null +++ scroll-reth/crates/scroll/alloy/hardforks/src/lib.rs @@ -0,0 +1,116 @@ +//! Scroll-Reth hard forks. + +#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(not(feature = "std"))] +extern crate alloc as std; + +pub use alloy_hardforks::ForkCondition; +use alloy_hardforks::{EthereumHardfork, EthereumHardforks}; +use std::vec::Vec; + +pub use hardfork::ScrollHardfork; +pub mod hardfork; + +/// Extends [`EthereumHardforks`] with scroll helper methods. +#[auto_impl::auto_impl(&, Arc)] +pub trait ScrollHardforks: EthereumHardforks { + /// Retrieves [`ForkCondition`] by an [`ScrollHardfork`]. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn scroll_fork_activation(&self, fork: ScrollHardfork) -> ForkCondition; + + /// Convenience method to check if [`Bernoulli`](ScrollHardfork::Bernoulli) is active at a given + /// block number. + fn is_bernoulli_active_at_block(&self, block_number: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::Bernoulli).active_at_block(block_number) + } + + /// Returns `true` if [`Curie`](ScrollHardfork::Curie) is active at given block block number. + fn is_curie_active_at_block(&self, block_number: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::Curie).active_at_block(block_number) + } + + /// Returns `true` if [`Darwin`](ScrollHardfork::Darwin) is active at given block timestamp. + fn is_darwin_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::Darwin).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`DarwinV2`](ScrollHardfork::DarwinV2) is active at given block timestamp. + fn is_darwin_v2_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::DarwinV2).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Euclid`](ScrollHardfork::Euclid) is active at given block timestamp. + fn is_euclid_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::Euclid).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`EuclidV2`](ScrollHardfork::EuclidV2) is active at given block timestamp. + fn is_euclid_v2_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::EuclidV2).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Feynman`](ScrollHardfork::Feynman) is active at given block timestamp. + fn is_feynman_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::Feynman).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Galileo`](ScrollHardfork::Galileo) is active at given block timestamp. + fn is_galileo_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::Galileo).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`GalileoV2`](ScrollHardfork::GalileoV2) is active at given block + /// timestamp. + fn is_galileo_v2_active_at_timestamp(&self, timestamp: u64) -> bool { + self.scroll_fork_activation(ScrollHardfork::GalileoV2).active_at_timestamp(timestamp) + } +} + +/// A type allowing to configure activation [`ForkCondition`]s for a given list of +/// [`ScrollHardfork`]s. +#[derive(Debug, Clone)] +pub struct ScrollChainHardforks { + /// Scroll hardfork activations. + forks: Vec<(ScrollHardfork, ForkCondition)>, +} + +impl ScrollChainHardforks { + /// Creates a new [`ScrollChainHardforks`] with the given list of forks. + pub fn new(forks: impl IntoIterator<Item = (ScrollHardfork, ForkCondition)>) -> Self { + let mut forks = forks.into_iter().collect::<Vec<_>>(); + forks.sort(); + Self { forks } + } + + /// Creates a new [`ScrollChainHardforks`] with Scroll mainnet configuration. + pub fn scroll_mainnet() -> Self { + Self::new(ScrollHardfork::scroll_mainnet()) + } + + /// Creates a new [`ScrollChainHardforks`] with Scroll Sepolia configuration. + pub fn scroll_sepolia() -> Self { + Self::new(ScrollHardfork::scroll_sepolia()) + } +} + +impl EthereumHardforks for ScrollChainHardforks { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + if fork < EthereumHardfork::ArrowGlacier { + ForkCondition::Block(0) + } else if fork <= EthereumHardfork::Shanghai { + self.scroll_fork_activation(ScrollHardfork::Bernoulli) + } else { + ForkCondition::Never + } + } +} + +impl ScrollHardforks for ScrollChainHardforks { + fn scroll_fork_activation(&self, fork: ScrollHardfork) -> ForkCondition { + let Ok(idx) = self.forks.binary_search_by(|(f, _)| f.cmp(&fork)) else { + return ForkCondition::Never; + }; + + self.forks[idx].1 + } +}
diff --git reth/crates/scroll/alloy/network/Cargo.toml scroll-reth/crates/scroll/alloy/network/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..24449a64ec846f1f7984597a68bbe66f0ecb9a4a --- /dev/null +++ scroll-reth/crates/scroll/alloy/network/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "scroll-alloy-network" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +alloy-provider = { workspace = true, default-features = false } + +scroll-alloy-consensus = { workspace = true, default-features = false } +scroll-alloy-rpc-types = { workspace = true, default-features = false } + +# alloy +alloy-consensus = { workspace = true, default-features = false } +alloy-network = { workspace = true, default-features = false } +alloy-primitives = { workspace = true, default-features = false } +alloy-rpc-types-eth = { workspace = true, default-features = false } +alloy-signer = { workspace = true, default-features = false } + +[features] +std = [ + "alloy-consensus/std", + "alloy-primitives/std", + "alloy-rpc-types-eth/std", + "scroll-alloy-consensus/std", + "scroll-alloy-rpc-types/std", +]
diff --git reth/crates/scroll/alloy/network/README.md scroll-reth/crates/scroll/alloy/network/README.md new file mode 100644 index 0000000000000000000000000000000000000000..da1f2fd66158a70e84a5a764eb413aea6be31f8f --- /dev/null +++ scroll-reth/crates/scroll/alloy/network/README.md @@ -0,0 +1,8 @@ +# scroll-alloy-network + +Scroll blockchain RPC behavior abstraction. + +This crate contains a simple abstraction of the RPC behavior of an +Scroll blockchain. It is intended to be used by the Alloy client to +provide a consistent interface to the rest of the library, regardless of +changes the underlying blockchain makes to the RPC interface.
diff --git reth/crates/scroll/alloy/network/src/lib.rs scroll-reth/crates/scroll/alloy/network/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa3525f668bbf48c930f84cf4b6c45754926ed43 --- /dev/null +++ scroll-reth/crates/scroll/alloy/network/src/lib.rs @@ -0,0 +1,245 @@ +#![doc = include_str!("../README.md")] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +use alloy_consensus::{TxEnvelope, TxType, TypedTransaction}; +pub use alloy_network::*; +use alloy_primitives::{Address, Bytes, ChainId, TxKind, U256}; +use alloy_provider::fillers::{ + ChainIdFiller, GasFiller, JoinFill, NonceFiller, RecommendedFillers, +}; +use alloy_rpc_types_eth::AccessList; +use scroll_alloy_consensus::{self, ScrollTxEnvelope, ScrollTxType, ScrollTypedTransaction}; +use scroll_alloy_rpc_types::ScrollTransactionRequest; + +/// Types for a Scroll-stack network. +#[derive(Clone, Copy, Debug)] +pub struct Scroll { + _private: (), +} + +impl Network for Scroll { + type TxType = ScrollTxType; + + type TxEnvelope = scroll_alloy_consensus::ScrollTxEnvelope; + + type UnsignedTx = scroll_alloy_consensus::ScrollTypedTransaction; + + type ReceiptEnvelope = scroll_alloy_consensus::ScrollReceiptEnvelope; + + type Header = alloy_consensus::Header; + + type TransactionRequest = scroll_alloy_rpc_types::ScrollTransactionRequest; + + type TransactionResponse = scroll_alloy_rpc_types::Transaction; + + type ReceiptResponse = scroll_alloy_rpc_types::ScrollTransactionReceipt; + + type HeaderResponse = alloy_rpc_types_eth::Header; + + type BlockResponse = + alloy_rpc_types_eth::Block<Self::TransactionResponse, Self::HeaderResponse>; +} + +impl TransactionBuilder<Scroll> for ScrollTransactionRequest { + fn chain_id(&self) -> Option<ChainId> { + self.as_ref().chain_id() + } + + fn set_chain_id(&mut self, chain_id: ChainId) { + self.as_mut().set_chain_id(chain_id); + } + + fn nonce(&self) -> Option<u64> { + self.as_ref().nonce() + } + + fn set_nonce(&mut self, nonce: u64) { + self.as_mut().set_nonce(nonce); + } + + fn take_nonce(&mut self) -> Option<u64> { + self.as_mut().nonce.take() + } + + fn input(&self) -> Option<&Bytes> { + self.as_ref().input() + } + + fn set_input<T: Into<Bytes>>(&mut self, input: T) { + self.as_mut().set_input(input); + } + + fn from(&self) -> Option<Address> { + self.as_ref().from() + } + + fn set_from(&mut self, from: Address) { + self.as_mut().set_from(from); + } + + fn kind(&self) -> Option<TxKind> { + self.as_ref().kind() + } + + fn clear_kind(&mut self) { + self.as_mut().clear_kind(); + } + + fn set_kind(&mut self, kind: TxKind) { + self.as_mut().set_kind(kind); + } + + fn value(&self) -> Option<U256> { + self.as_ref().value() + } + + fn set_value(&mut self, value: U256) { + self.as_mut().set_value(value); + } + + fn gas_price(&self) -> Option<u128> { + self.as_ref().gas_price() + } + + fn set_gas_price(&mut self, gas_price: u128) { + self.as_mut().set_gas_price(gas_price); + } + + fn max_fee_per_gas(&self) -> Option<u128> { + self.as_ref().max_fee_per_gas() + } + + fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) { + self.as_mut().set_max_fee_per_gas(max_fee_per_gas); + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + self.as_ref().max_priority_fee_per_gas() + } + + fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: u128) { + self.as_mut().set_max_priority_fee_per_gas(max_priority_fee_per_gas); + } + + fn gas_limit(&self) -> Option<u64> { + self.as_ref().gas_limit() + } + + fn set_gas_limit(&mut self, gas_limit: u64) { + self.as_mut().set_gas_limit(gas_limit); + } + + fn access_list(&self) -> Option<&AccessList> { + self.as_ref().access_list() + } + + fn set_access_list(&mut self, access_list: AccessList) { + self.as_mut().set_access_list(access_list); + } + + fn complete_type(&self, ty: ScrollTxType) -> Result<(), Vec<&'static str>> { + match ty { + ScrollTxType::L1Message => Err(vec!["not implemented for L1 message tx"]), + _ => { + let ty = TxType::try_from(ty as u8).unwrap(); + self.as_ref().complete_type(ty) + } + } + } + + fn can_submit(&self) -> bool { + self.as_ref().can_submit() + } + + fn can_build(&self) -> bool { + self.as_ref().can_build() + } + + #[doc(alias = "output_transaction_type")] + fn output_tx_type(&self) -> ScrollTxType { + match self.as_ref().preferred_type() { + TxType::Eip1559 | TxType::Eip4844 => ScrollTxType::Eip1559, + TxType::Eip2930 => ScrollTxType::Eip2930, + TxType::Legacy => ScrollTxType::Legacy, + TxType::Eip7702 => ScrollTxType::Eip7702, + } + } + + #[doc(alias = "output_transaction_type_checked")] + fn output_tx_type_checked(&self) -> Option<ScrollTxType> { + self.as_ref().buildable_type().map(|tx_ty| match tx_ty { + TxType::Eip1559 | TxType::Eip4844 => ScrollTxType::Eip1559, + TxType::Eip2930 => ScrollTxType::Eip2930, + TxType::Legacy => ScrollTxType::Legacy, + TxType::Eip7702 => ScrollTxType::Eip7702, + }) + } + + fn prep_for_submission(&mut self) { + self.as_mut().prep_for_submission(); + } + + fn build_unsigned(self) -> BuildResult<ScrollTypedTransaction, Scroll> { + if let Err((tx_type, missing)) = self.as_ref().missing_keys() { + let tx_type = ScrollTxType::try_from(tx_type as u8).unwrap(); + return Err(TransactionBuilderError::InvalidTransactionRequest(tx_type, missing) + .into_unbuilt(self)); + } + Ok(self.build_typed_tx().expect("checked by missing_keys")) + } + + async fn build<W: NetworkWallet<Scroll>>( + self, + wallet: &W, + ) -> Result<<Scroll as Network>::TxEnvelope, TransactionBuilderError<Scroll>> { + Ok(wallet.sign_request(self).await?) + } +} + +impl NetworkWallet<Scroll> for EthereumWallet { + fn default_signer_address(&self) -> Address { + NetworkWallet::<Ethereum>::default_signer_address(self) + } + + fn has_signer_for(&self, address: &Address) -> bool { + NetworkWallet::<Ethereum>::has_signer_for(self, address) + } + + fn signer_addresses(&self) -> impl Iterator<Item = Address> { + NetworkWallet::<Ethereum>::signer_addresses(self) + } + + async fn sign_transaction_from( + &self, + sender: Address, + tx: ScrollTypedTransaction, + ) -> alloy_signer::Result<ScrollTxEnvelope> { + let tx = match tx { + ScrollTypedTransaction::Legacy(tx) => TypedTransaction::Legacy(tx), + ScrollTypedTransaction::Eip2930(tx) => TypedTransaction::Eip2930(tx), + ScrollTypedTransaction::Eip1559(tx) => TypedTransaction::Eip1559(tx), + ScrollTypedTransaction::Eip7702(tx) => TypedTransaction::Eip7702(tx), + ScrollTypedTransaction::L1Message(_) => { + return Err(alloy_signer::Error::other("not implemented for deposit tx")) + } + }; + let tx = NetworkWallet::<Ethereum>::sign_transaction_from(self, sender, tx).await?; + + Ok(match tx { + TxEnvelope::Eip1559(tx) => ScrollTxEnvelope::Eip1559(tx), + TxEnvelope::Eip2930(tx) => ScrollTxEnvelope::Eip2930(tx), + TxEnvelope::Eip7702(tx) => ScrollTxEnvelope::Eip7702(tx), + TxEnvelope::Legacy(tx) => ScrollTxEnvelope::Legacy(tx), + _ => unreachable!(), + }) + } +} + +impl RecommendedFillers for Scroll { + type RecommendedFillers = JoinFill<GasFiller, JoinFill<NonceFiller, ChainIdFiller>>; + + fn recommended_fillers() -> Self::RecommendedFillers { + Default::default() + } +}
diff --git reth/crates/scroll/alloy/provider/Cargo.toml scroll-reth/crates/scroll/alloy/provider/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9e41595983bee7818c0220009100c4c593833088 --- /dev/null +++ scroll-reth/crates/scroll/alloy/provider/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "scroll-alloy-provider" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-provider.workspace = true +alloy-primitives.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +alloy-rpc-client.workspace = true +alloy-transport.workspace = true +alloy-transport-http = { workspace = true, features = ["jwt-auth"] } + +# scroll +scroll-alloy-network.workspace = true +scroll-alloy-rpc-types-engine = { workspace = true, features = ["serde"] } + +# reth +reth-rpc-api = { workspace = true, features = ["client"] } + +# reth-scroll +reth-scroll-engine-primitives.workspace = true + +# misc +auto_impl.workspace = true +async-trait.workspace = true +derive_more.workspace = true +eyre.workspace = true +http-body-util.workspace = true +reqwest.workspace = true +tower.workspace = true +thiserror.workspace = true +jsonrpsee.workspace = true + +[dev-dependencies] +reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-engine-primitives.workspace = true +reth-payload-primitives.workspace = true +reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-rpc-builder.workspace = true +reth-rpc-engine-api.workspace = true +reth-scroll-engine-primitives.workspace = true +reth-scroll-node.workspace = true +reth-scroll-payload = { workspace = true, features = ["test-utils"] } +reth-scroll-chainspec.workspace = true +reth-tasks.workspace = true +reth-tracing.workspace = true +reth-transaction-pool.workspace = true + +tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } +futures-util.workspace = true + +[features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-rpc-types-engine/std", + "scroll-alloy-rpc-types-engine/std", + "derive_more/std", + "reth-engine-primitives/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "futures-util/std", + "reth-scroll-chainspec/std", + "thiserror/std", + "scroll-alloy-network/std", +]
diff --git reth/crates/scroll/alloy/provider/src/engine/client.rs scroll-reth/crates/scroll/alloy/provider/src/engine/client.rs new file mode 100644 index 0000000000000000000000000000000000000000..145b833815c7bdd4ab9874f7dcdfaf461218375c --- /dev/null +++ scroll-reth/crates/scroll/alloy/provider/src/engine/client.rs @@ -0,0 +1,79 @@ +use super::{ScrollEngineApi, ScrollEngineApiResult}; +use alloy_primitives::{BlockHash, U64}; +use alloy_rpc_types_engine::{ + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadV1, ForkchoiceState, + ForkchoiceUpdated, PayloadId, PayloadStatus, +}; + +use reth_rpc_api::EngineApiClient; +use reth_scroll_engine_primitives::ScrollEngineTypes; +use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; + +/// A Client for a type that implements the [`EngineApiClient`] trait. +#[derive(Debug)] +pub struct ScrollAuthApiEngineClient<T> { + client: T, +} + +impl<T> ScrollAuthApiEngineClient<T> { + /// Creates a new [`ScrollAuthApiEngineClient`] with the given client. + pub const fn new(client: T) -> Self { + Self { client } + } +} + +#[async_trait::async_trait] +impl<EC: EngineApiClient<ScrollEngineTypes> + Sync> ScrollEngineApi + for ScrollAuthApiEngineClient<EC> +{ + async fn new_payload_v1( + &self, + payload: ExecutionPayloadV1, + ) -> ScrollEngineApiResult<PayloadStatus> { + Ok(self.client.new_payload_v1(payload).await?) + } + + async fn fork_choice_updated_v1( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option<ScrollPayloadAttributes>, + ) -> ScrollEngineApiResult<ForkchoiceUpdated> { + Ok(self.client.fork_choice_updated_v1(fork_choice_state, payload_attributes).await?) + } + + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> ScrollEngineApiResult<ExecutionPayloadV1> { + Ok(self.client.get_payload_v1(payload_id).await?) + } + + async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec<BlockHash>, + ) -> ScrollEngineApiResult<ExecutionPayloadBodiesV1> { + Ok(self.client.get_payload_bodies_by_hash_v1(block_hashes).await?) + } + + async fn get_payload_bodies_by_range_v1( + &self, + start: U64, + count: U64, + ) -> ScrollEngineApiResult<ExecutionPayloadBodiesV1> { + Ok(self.client.get_payload_bodies_by_range_v1(start, count).await?) + } + + async fn get_client_version_v1( + &self, + client_version: ClientVersionV1, + ) -> ScrollEngineApiResult<Vec<ClientVersionV1>> { + Ok(self.client.get_client_version_v1(client_version).await?) + } + + async fn exchange_capabilities( + &self, + capabilities: Vec<String>, + ) -> ScrollEngineApiResult<Vec<String>> { + Ok(self.exchange_capabilities(capabilities).await?) + } +}
diff --git reth/crates/scroll/alloy/provider/src/engine/mod.rs scroll-reth/crates/scroll/alloy/provider/src/engine/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..466489f9af680a7487581f921deaa31a9f825373 --- /dev/null +++ scroll-reth/crates/scroll/alloy/provider/src/engine/mod.rs @@ -0,0 +1,105 @@ +mod provider; + +pub use provider::ScrollAuthEngineApiProvider; + +mod client; +pub use client::ScrollAuthApiEngineClient; + +use super::error::ScrollEngineApiError; +use alloy_primitives::{BlockHash, U64}; +use alloy_rpc_types_engine::{ + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadV1, ForkchoiceState, + ForkchoiceUpdated, PayloadId, PayloadStatus, +}; + +use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; + +/// A type alias for the result of the Scroll Engine API methods. +pub type ScrollEngineApiResult<T> = Result<T, ScrollEngineApiError>; + +/// Engine API trait for Scroll. Only exposes versions of the API that are supported. +/// Note: +/// > The provider should use a JWT authentication layer. +#[async_trait::async_trait] +#[auto_impl::auto_impl(Arc, &, Box)] +pub trait ScrollEngineApi { + /// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_newpayloadv1> + /// Caution: This should not accept the `withdrawals` field + async fn new_payload_v1( + &self, + payload: ExecutionPayloadV1, + ) -> ScrollEngineApiResult<PayloadStatus>; + + /// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_forkchoiceupdatedv1> + /// Caution: This should not accept the `withdrawals` field in the payload attributes. + /// + /// Modifications: + /// - Adds the below fields to the `payload_attributes`: + /// - transactions: an optional list of transactions to include at the start of the block. + /// - `no_tx_pool`: a boolean which signals whether pool transactions need to be included in + /// the payload building task. + async fn fork_choice_updated_v1( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option<ScrollPayloadAttributes>, + ) -> ScrollEngineApiResult<ForkchoiceUpdated>; + + /// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_getpayloadv1> + /// + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// Caution: This should not return the `withdrawals` field + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> ScrollEngineApiResult<ExecutionPayloadV1>; + + /// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/shanghai.md#engine_getpayloadbodiesbyhashv1> + async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec<BlockHash>, + ) -> ScrollEngineApiResult<ExecutionPayloadBodiesV1>; + + /// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/shanghai.md#engine_getpayloadbodiesbyrangev1> + /// + /// Returns the execution payload bodies by the range starting at `start`, containing `count` + /// blocks. + /// + /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus + /// layer p2p specification, meaning the input should be treated as untrusted or potentially + /// adversarial. + /// + /// Implementers should take care when acting on the input to this method, specifically + /// ensuring that the range is limited properly, and that the range boundaries are computed + /// correctly and without panics. + async fn get_payload_bodies_by_range_v1( + &self, + start: U64, + count: U64, + ) -> ScrollEngineApiResult<ExecutionPayloadBodiesV1>; + + /// This function will return the `ClientVersionV1` object. + /// See also: + /// <https://github.com/ethereum/execution-apis/blob/03911ffc053b8b806123f1fc237184b0092a485a/src/engine/identification.md#engine_getclientversionv1>make fmt + /// + /// + /// - When connected to a single execution client, the consensus client **MUST** receive an + /// array with a single `ClientVersionV1` object. + /// - When connected to multiple execution clients via a multiplexer, the multiplexer **MUST** + /// concatenate the responses from each execution client into a single, + /// flat array before returning the response to the consensus client. + async fn get_client_version_v1( + &self, + client_version: ClientVersionV1, + ) -> ScrollEngineApiResult<Vec<ClientVersionV1>>; + + /// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/common.md#capabilities> + async fn exchange_capabilities( + &self, + capabilities: Vec<String>, + ) -> ScrollEngineApiResult<Vec<String>>; +}
diff --git reth/crates/scroll/alloy/provider/src/engine/provider.rs scroll-reth/crates/scroll/alloy/provider/src/engine/provider.rs new file mode 100644 index 0000000000000000000000000000000000000000..8766068a29bb633066001b1ba99cac8470d71566 --- /dev/null +++ scroll-reth/crates/scroll/alloy/provider/src/engine/provider.rs @@ -0,0 +1,218 @@ +use super::{ScrollEngineApi, ScrollEngineApiResult}; +use alloy_primitives::{bytes::Bytes, BlockHash, U64}; +use alloy_provider::{Provider, RootProvider}; +use alloy_rpc_client::RpcClient; +use alloy_rpc_types_engine::{ + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadV1, ForkchoiceState, + ForkchoiceUpdated, JwtSecret, PayloadId, PayloadStatus, +}; +use alloy_transport::utils::guess_local_url; +use alloy_transport_http::{ + hyper_util, hyper_util::rt::TokioExecutor, AuthLayer, Http, HyperClient, +}; +use http_body_util::Full; +use reqwest::Url; +use scroll_alloy_network::Scroll; +use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; +use std::fmt::Debug; + +/// An authenticated [`alloy_provider::Provider`] to the [`ScrollEngineApi`]. +#[derive(Clone)] +pub struct ScrollAuthEngineApiProvider<P> { + provider: P, +} + +impl<P> Debug for ScrollAuthEngineApiProvider<P> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ScrollAuthEngineApiProvider").field("provider", &"provider").finish() + } +} + +impl ScrollAuthEngineApiProvider<RootProvider<Scroll>> { + /// Returns a new [`ScrollAuthEngineApiProvider`], authenticated for interfacing with the Engine + /// API server at the provided URL using the passed JWT secret. + pub fn new(jwt_secret: JwtSecret, url: Url) -> Self { + let auth_layer = AuthLayer::new(jwt_secret); + let hyper_client = hyper_util::client::legacy::Client::builder(TokioExecutor::new()) + .build_http::<Full<Bytes>>(); + + let service = tower::ServiceBuilder::new().layer(auth_layer).service(hyper_client); + let transport = HyperClient::<Full<Bytes>, _>::with_service(service); + + let is_url_local = guess_local_url(&url); + let http = Http::with_client(transport, url); + let client = RpcClient::new(http, is_url_local); + + let provider = RootProvider::<Scroll>::new(client); + Self { provider } + } +} + +impl<P> ScrollAuthEngineApiProvider<P> { + /// Returns a new [`ScrollAuthEngineApiProvider`] from the given provider. + pub const fn from_provider(provider: P) -> Self { + Self { provider } + } +} + +#[async_trait::async_trait] +impl<P: Provider<Scroll>> ScrollEngineApi for ScrollAuthEngineApiProvider<P> { + async fn new_payload_v1( + &self, + payload: ExecutionPayloadV1, + ) -> ScrollEngineApiResult<PayloadStatus> { + Ok(self.provider.client().request("engine_newPayloadV1", (payload,)).await?) + } + + async fn fork_choice_updated_v1( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option<ScrollPayloadAttributes>, + ) -> ScrollEngineApiResult<ForkchoiceUpdated> { + Ok(self + .provider + .client() + .request("engine_forkchoiceUpdatedV1", (fork_choice_state, payload_attributes)) + .await?) + } + + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> ScrollEngineApiResult<ExecutionPayloadV1> { + Ok(self.provider.client().request("engine_getPayloadV1", (payload_id,)).await?) + } + + async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec<BlockHash>, + ) -> ScrollEngineApiResult<ExecutionPayloadBodiesV1> { + Ok(self + .provider + .client() + .request("engine_getPayloadBodiesByHashV1", (block_hashes,)) + .await?) + } + + async fn get_payload_bodies_by_range_v1( + &self, + start: U64, + count: U64, + ) -> ScrollEngineApiResult<ExecutionPayloadBodiesV1> { + Ok(self + .provider + .client() + .request("engine_getPayloadBodiesByRangeV1", (start, count)) + .await?) + } + + async fn get_client_version_v1( + &self, + client_version: ClientVersionV1, + ) -> ScrollEngineApiResult<Vec<ClientVersionV1>> { + Ok(self.provider.client().request("engine_getClientVersionV1", (client_version,)).await?) + } + + async fn exchange_capabilities( + &self, + capabilities: Vec<String>, + ) -> ScrollEngineApiResult<Vec<String>> { + Ok(self.provider.client().request("engine_exchangeCapabilities", (capabilities,)).await?) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::engine::ScrollEngineApi; + use alloy_primitives::U64; + use alloy_rpc_types_engine::{ + ClientCode, ClientVersionV1, ExecutionPayloadV1, ForkchoiceState, PayloadId, + }; + use reth_engine_primitives::ConsensusEngineHandle; + use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; + use reth_payload_primitives::PayloadTypes; + use reth_primitives::{Block, TransactionSigned}; + use reth_primitives_traits::block::Block as _; + use reth_provider::{test_utils::NoopProvider, CanonStateNotification}; + use reth_rpc_builder::auth::{AuthRpcModule, AuthServerConfig, AuthServerHandle}; + use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; + use reth_scroll_chainspec::SCROLL_MAINNET; + use reth_scroll_engine_primitives::{ + ScrollBuiltPayload, ScrollEngineTypes, ScrollPayloadBuilderAttributes, + }; + use reth_scroll_node::ScrollEngineValidator; + use reth_scroll_payload::NoopPayloadJobGenerator; + use reth_tasks::TokioTaskExecutor; + use reth_transaction_pool::noop::NoopTransactionPool; + use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; + use tokio::sync::mpsc::unbounded_channel; + + fn spawn_test_payload_service<T>() -> PayloadBuilderHandle<T> + where + T: PayloadTypes< + PayloadBuilderAttributes = ScrollPayloadBuilderAttributes, + BuiltPayload = ScrollBuiltPayload, + > + 'static, + { + let (service, handle) = PayloadBuilderService::< + NoopPayloadJobGenerator<ScrollPayloadBuilderAttributes, ScrollBuiltPayload>, + futures_util::stream::Empty<CanonStateNotification>, + T, + >::new(Default::default(), futures_util::stream::empty()); + tokio::spawn(service); + handle + } + + async fn launch_auth(jwt_secret: JwtSecret) -> AuthServerHandle { + let config = AuthServerConfig::builder(jwt_secret) + .socket_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))) + .build(); + let (tx, _rx) = unbounded_channel(); + let beacon_engine_handle = ConsensusEngineHandle::<ScrollEngineTypes>::new(tx); + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + + let engine_api = EngineApi::new( + NoopProvider::default(), + SCROLL_MAINNET.clone(), + beacon_engine_handle, + spawn_test_payload_service().into(), + NoopTransactionPool::default(), + Box::<TokioTaskExecutor>::default(), + client, + EngineCapabilities::default(), + ScrollEngineValidator::new(SCROLL_MAINNET.clone()), + false, + ); + let module = AuthRpcModule::new(engine_api); + module.start_server(config).await.unwrap() + } + + #[allow(unused_must_use)] + #[tokio::test(flavor = "multi_thread")] + async fn test_engine_api_provider() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let secret = JwtSecret::random(); + let handle = launch_auth(secret).await; + let url = handle.http_url().parse()?; + let provider = ScrollAuthEngineApiProvider::new(secret, url); + + let block = Block::<TransactionSigned>::default().seal_slow(); + let execution_payload = + ExecutionPayloadV1::from_block_unchecked(block.hash(), &block.clone().into_block()); + provider.new_payload_v1(execution_payload).await; + provider.fork_choice_updated_v1(ForkchoiceState::default(), None).await; + provider.get_payload_v1(PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; + provider.get_payload_bodies_by_hash_v1(vec![]).await; + provider.get_payload_bodies_by_range_v1(U64::ZERO, U64::from(1u64)).await; + provider.exchange_capabilities(vec![]).await; + + Ok(()) + } +}
diff --git reth/crates/scroll/alloy/provider/src/error.rs scroll-reth/crates/scroll/alloy/provider/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..88cfc1eb7af1b9a04c63935492e3d0bed790c3d5 --- /dev/null +++ scroll-reth/crates/scroll/alloy/provider/src/error.rs @@ -0,0 +1,10 @@ +/// The error type for the Scroll engine API. +#[derive(thiserror::Error, Debug)] +pub enum ScrollEngineApiError { + /// Error when decoding a response from an rpsee client. + #[error("Jsonrpsee error: {0}")] + JsonRpseeError(#[from] jsonrpsee::core::ClientError), + /// Error when decoding a response from an alloy client. + #[error("Alloy error: {0}")] + AlloyError(#[from] alloy_transport::TransportError), +}
diff --git reth/crates/scroll/alloy/provider/src/lib.rs scroll-reth/crates/scroll/alloy/provider/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..812b635562098ab12bbead82f1a5a6bd1a1646f9 --- /dev/null +++ scroll-reth/crates/scroll/alloy/provider/src/lib.rs @@ -0,0 +1,9 @@ +//! Providers implementations fitted to Scroll needs. + +mod engine; +pub use engine::{ + ScrollAuthApiEngineClient, ScrollAuthEngineApiProvider, ScrollEngineApi, ScrollEngineApiResult, +}; + +mod error; +pub use error::ScrollEngineApiError;
diff --git reth/crates/scroll/alloy/rpc-types-engine/Cargo.toml scroll-reth/crates/scroll/alloy/rpc-types-engine/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..41dd51a6d0921a79e818a804fae2135614498d96 --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types-engine/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "scroll-alloy-rpc-types-engine" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true + +# misc +serde = { workspace = true, optional = true } + +# test-utils +arbitrary = { workspace = true, optional = true } + +[dev-dependencies] +alloy-primitives = { workspace = true, features = ["getrandom"] } +serde_json.workspace = true + +[features] +default = ["std"] +arbitrary = [ + "dep:arbitrary", + "alloy-primitives/arbitrary", + "alloy-rpc-types-engine/arbitrary", +] +std = [ + "alloy-primitives/std", + "alloy-rpc-types-engine/std", + "serde?/std", + "serde_json/std", +] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "alloy-rpc-types-engine/serde", +]
diff --git reth/crates/scroll/alloy/rpc-types-engine/src/attributes.rs scroll-reth/crates/scroll/alloy/rpc-types-engine/src/attributes.rs new file mode 100644 index 0000000000000000000000000000000000000000..25b522ac0315680c385e5e6be7cc7b1c1b2ab0cf --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types-engine/src/attributes.rs @@ -0,0 +1,123 @@ +//! Scroll-specific payload attributes. + +use alloc::vec::Vec; +use alloy_primitives::{Address, Bytes, B256, U256}; +use alloy_rpc_types_engine::PayloadAttributes; + +/// The payload attributes for block building tailored for Scroll. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +pub struct ScrollPayloadAttributes { + /// The payload attributes. + pub payload_attributes: PayloadAttributes, + /// An optional array of transaction to be forced included in the block (includes l1 messages). + pub transactions: Option<Vec<Bytes>>, + /// Indicates whether the payload building job should happen with or without pool transactions. + pub no_tx_pool: bool, + /// The block data hint, used pre-Euclid by the block builder to derive the correct block + /// hash and post-Euclid by the sequencer to set the difficulty of the block. + pub block_data_hint: BlockDataHint, + /// The gas limit for the block building task. + pub gas_limit: Option<u64>, +} + +/// Block data provided as a hint to the payload attributes. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +pub struct BlockDataHint { + /// The optional extra data for the block. + pub extra_data: Option<Bytes>, + /// The optional state root for the block. + pub state_root: Option<B256>, + /// The optional coinbase for the block. + pub coinbase: Option<Address>, + /// The optional nonce for the block. + pub nonce: Option<u64>, + /// The optional difficulty for the block. + pub difficulty: Option<U256>, +} + +impl BlockDataHint { + /// Returns an empty [`BlockDataHint`] with all fields set to `None`. + pub fn none() -> Self { + Self::default() + } + + /// Returns `true` if the [`BlockDataHint`] is empty. + pub const fn is_empty(&self) -> bool { + self.extra_data.is_none() && + self.state_root.is_none() && + self.coinbase.is_none() && + self.nonce.is_none() && + self.difficulty.is_none() + } +} + +#[cfg(feature = "arbitrary")] +impl<'a> arbitrary::Arbitrary<'a> for BlockDataHint { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> { + Ok(Self { + extra_data: Some(Bytes::arbitrary(u)?), + state_root: Some(B256::arbitrary(u)?), + coinbase: Some(Address::arbitrary(u)?), + nonce: Some(u64::arbitrary(u)?), + difficulty: Some(U256::arbitrary(u)?), + }) + } +} + +#[cfg(feature = "arbitrary")] +impl<'a> arbitrary::Arbitrary<'a> for ScrollPayloadAttributes { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> { + Ok(Self { + payload_attributes: PayloadAttributes { + timestamp: u64::arbitrary(u)?, + prev_randao: alloy_primitives::B256::arbitrary(u)?, + suggested_fee_recipient: alloy_primitives::Address::arbitrary(u)?, + withdrawals: None, + parent_beacon_block_root: Some(alloy_primitives::B256::arbitrary(u)?), + }, + transactions: Some(Vec::arbitrary(u)?), + no_tx_pool: bool::arbitrary(u)?, + block_data_hint: BlockDataHint::arbitrary(u)?, + gas_limit: Some(u64::arbitrary(u)?), + }) + } +} + +#[cfg(all(test, feature = "serde"))] +mod test { + use super::*; + use alloy_primitives::{Address, B256}; + use alloy_rpc_types_engine::PayloadAttributes; + + #[test] + fn test_serde_roundtrip_attributes() { + let attributes = ScrollPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 0x1337, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Default::default(), + parent_beacon_block_root: Some(B256::ZERO), + }, + transactions: Some(vec![b"hello".to_vec().into()]), + no_tx_pool: true, + block_data_hint: BlockDataHint { + extra_data: Some(b"world".into()), + state_root: Some(B256::random()), + coinbase: Some(Address::random()), + nonce: Some(0x12345), + difficulty: Some(U256::from(10)), + }, + gas_limit: Some(10_000_000), + }; + + let ser = serde_json::to_string(&attributes).unwrap(); + let de: ScrollPayloadAttributes = serde_json::from_str(&ser).unwrap(); + + assert_eq!(attributes, de); + } +}
diff --git reth/crates/scroll/alloy/rpc-types-engine/src/lib.rs scroll-reth/crates/scroll/alloy/rpc-types-engine/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..fcf62c269351c5e3dfe1ffa10ac66cc5fff95f6d --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types-engine/src/lib.rs @@ -0,0 +1,8 @@ +//! Scroll types for interaction with the Engine API via RPC. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod attributes; +pub use attributes::{BlockDataHint, ScrollPayloadAttributes}; + +extern crate alloc;
diff --git reth/crates/scroll/alloy/rpc-types/Cargo.toml scroll-reth/crates/scroll/alloy/rpc-types/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ceb402689dfbbedaf1f23ad06d36d309ff1916af --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "scroll-alloy-rpc-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +scroll-alloy-consensus = { workspace = true, features = ["serde"] } +alloy-consensus = { workspace = true, default-features = false } +alloy-eips = { workspace = true, features = ["serde"], default-features = false } +alloy-network-primitives = { workspace = true, default-features = false } +alloy-primitives = { workspace = true, features = ["map", "rlp", "serde"], default-features = false } +alloy-rpc-types-eth = { workspace = true, features = ["serde"], default-features = false } +alloy-serde = { workspace = true, default-features = false } +arbitrary = { workspace = true, features = ["derive", "derive"], optional = true } +derive_more = { workspace = true, default-features = false } +serde = { workspace = true, features = ["derive", "alloc", "derive"], default-features = false } +serde_json = { workspace = true, features = ["alloc"], default-features = false } + +[dev-dependencies] +similar-asserts = "1.6" +alloy-consensus = { workspace = true, features = ["arbitrary"], default-features = false } +alloy-primitives = { workspace = true, features = ["arbitrary"], default-features = false } +alloy-rpc-types-eth = { workspace = true, features = ["arbitrary"], default-features = false } +arbitrary = { workspace = true, features = ["derive", "derive"] } + +[features] +arbitrary = [ + "std", + "dep:arbitrary", + "alloy-primitives/arbitrary", + "alloy-rpc-types-eth/arbitrary", + "scroll-alloy-consensus/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-serde/arbitrary", +] +default = ["std"] +k256 = [ + "alloy-rpc-types-eth/k256", + "scroll-alloy-consensus/k256", +] +std = [ + "alloy-network-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-rpc-types-eth/std", + "alloy-consensus/std", + "alloy-serde/std", + "derive_more/std", + "serde/std", + "serde_json/std", + "scroll-alloy-consensus/std", +]
diff --git reth/crates/scroll/alloy/rpc-types/README.md scroll-reth/crates/scroll/alloy/rpc-types/README.md new file mode 100644 index 0000000000000000000000000000000000000000..963a99c4b23daddceeb49feb55a579509ed91ac1 --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types/README.md @@ -0,0 +1,3 @@ +# scroll-alloy-rpc-types + +Scroll RPC-related types.
diff --git reth/crates/scroll/alloy/rpc-types/src/lib.rs scroll-reth/crates/scroll/alloy/rpc-types/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b79eb4da689f5e3a4b1589afbee085f0eb6886d3 --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types/src/lib.rs @@ -0,0 +1,14 @@ +#![doc = include_str!("../README.md")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/alloy-rs/core/main/assets/alloy.jpg", + html_favicon_url = "https://raw.githubusercontent.com/alloy-rs/core/main/assets/favicon.ico" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(any(test, feature = "std")), no_std)] + +mod receipt; +pub use receipt::{ScrollTransactionReceipt, ScrollTransactionReceiptFields}; + +mod transaction; +pub use transaction::{ScrollL1MessageTransactionFields, ScrollTransactionRequest, Transaction};
diff --git reth/crates/scroll/alloy/rpc-types/src/receipt.rs scroll-reth/crates/scroll/alloy/rpc-types/src/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..6a0a04c660252c0180e7940015f49d6e7d2b0c3c --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types/src/receipt.rs @@ -0,0 +1,176 @@ +//! Receipt types for RPC + +use alloy_consensus::{Receipt, ReceiptWithBloom}; +use alloy_serde::OtherFields; +use serde::{Deserialize, Serialize}; + +use scroll_alloy_consensus::ScrollReceiptEnvelope; + +/// Scroll Transaction Receipt type +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[doc(alias = "ScrollTxReceipt")] +pub struct ScrollTransactionReceipt { + /// Regular eth transaction receipt including deposit receipts + #[serde(flatten)] + pub inner: + alloy_rpc_types_eth::TransactionReceipt<ScrollReceiptEnvelope<alloy_rpc_types_eth::Log>>, + /// L1 fee for the transaction. + #[serde(default, skip_serializing_if = "Option::is_none", with = "alloy_serde::quantity::opt")] + pub l1_fee: Option<u128>, +} + +impl alloy_network_primitives::ReceiptResponse for ScrollTransactionReceipt { + fn contract_address(&self) -> Option<alloy_primitives::Address> { + self.inner.contract_address + } + + fn status(&self) -> bool { + self.inner.inner.status() + } + + fn block_hash(&self) -> Option<alloy_primitives::BlockHash> { + self.inner.block_hash + } + + fn block_number(&self) -> Option<u64> { + self.inner.block_number + } + + fn transaction_hash(&self) -> alloy_primitives::TxHash { + self.inner.transaction_hash + } + + fn transaction_index(&self) -> Option<u64> { + self.inner.transaction_index() + } + + fn gas_used(&self) -> u64 { + self.inner.gas_used() + } + + fn effective_gas_price(&self) -> u128 { + self.inner.effective_gas_price() + } + + fn blob_gas_used(&self) -> Option<u64> { + self.inner.blob_gas_used() + } + + fn blob_gas_price(&self) -> Option<u128> { + self.inner.blob_gas_price() + } + + fn from(&self) -> alloy_primitives::Address { + self.inner.from() + } + + fn to(&self) -> Option<alloy_primitives::Address> { + self.inner.to() + } + + fn cumulative_gas_used(&self) -> u64 { + self.inner.cumulative_gas_used() + } + + fn state_root(&self) -> Option<alloy_primitives::B256> { + self.inner.state_root() + } +} + +/// Additional fields for Scroll transaction receipts: <https://github.com/scroll-tech/go-ethereum/blob/develop/core/types/receipt.go#L78> +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[doc(alias = "ScrollTxReceiptFields")] +pub struct ScrollTransactionReceiptFields { + /// L1 fee for the transaction. + #[serde(default, skip_serializing_if = "Option::is_none", with = "alloy_serde::quantity::opt")] + pub l1_fee: Option<u128>, +} + +impl From<ScrollTransactionReceiptFields> for OtherFields { + fn from(value: ScrollTransactionReceiptFields) -> Self { + serde_json::to_value(value).unwrap().try_into().unwrap() + } +} + +impl From<ScrollTransactionReceipt> for ScrollReceiptEnvelope<alloy_primitives::Log> { + fn from(value: ScrollTransactionReceipt) -> Self { + let inner_envelope = value.inner.inner; + + /// Helper function to convert the inner logs within a [`ReceiptWithBloom`] from RPC to + /// consensus types. + #[inline(always)] + fn convert_standard_receipt( + receipt: ReceiptWithBloom<Receipt<alloy_rpc_types_eth::Log>>, + ) -> ReceiptWithBloom<Receipt<alloy_primitives::Log>> { + let ReceiptWithBloom { logs_bloom, receipt } = receipt; + + let consensus_logs = receipt.logs.into_iter().map(|log| log.inner).collect(); + ReceiptWithBloom { + receipt: Receipt { + status: receipt.status, + cumulative_gas_used: receipt.cumulative_gas_used, + logs: consensus_logs, + }, + logs_bloom, + } + } + + match inner_envelope { + ScrollReceiptEnvelope::Legacy(receipt) => { + Self::Legacy(convert_standard_receipt(receipt)) + } + ScrollReceiptEnvelope::Eip2930(receipt) => { + Self::Eip2930(convert_standard_receipt(receipt)) + } + ScrollReceiptEnvelope::Eip1559(receipt) => { + Self::Eip1559(convert_standard_receipt(receipt)) + } + ScrollReceiptEnvelope::L1Message(receipt) => { + Self::L1Message(convert_standard_receipt(receipt)) + } + _ => unreachable!("Unsupported ScrollReceiptEnvelope variant"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + // <https://github.com/alloy-rs/op-alloy/issues/18> + #[test] + fn parse_rpc_receipt() { + let s = r#"{ + "blockHash": "0x9e6a0fb7e22159d943d760608cc36a0fb596d1ab3c997146f5b7c55c8c718c67", + "blockNumber": "0x6cfef89", + "contractAddress": null, + "cumulativeGasUsed": "0xfa0d", + "effectiveGasPrice": "0x0", + "from": "0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001", + "gasUsed": "0xfa0d", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x4200000000000000000000000000000000000015", + "transactionHash": "0xb7c74afdeb7c89fb9de2c312f49b38cb7a850ba36e064734c5223a477e83fdc9", + "transactionIndex": "0x0", + "type": "0x7e" + }"#; + + let receipt: ScrollTransactionReceipt = serde_json::from_str(s).unwrap(); + let value = serde_json::to_value(&receipt).unwrap(); + let expected_value = serde_json::from_str::<serde_json::Value>(s).unwrap(); + assert_eq!(value, expected_value); + } + + #[test] + fn serialize_empty_scroll_transaction_receipt_fields_struct() { + let scroll_fields = ScrollTransactionReceiptFields::default(); + + let json = serde_json::to_value(scroll_fields).unwrap(); + assert_eq!(json, json!({})); + } +}
diff --git reth/crates/scroll/alloy/rpc-types/src/transaction.rs scroll-reth/crates/scroll/alloy/rpc-types/src/transaction.rs new file mode 100644 index 0000000000000000000000000000000000000000..1aef1bf0591e0b9105a32cae197f7cc8086b27d9 --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types/src/transaction.rs @@ -0,0 +1,317 @@ +//! Scroll specific types related to transactions. + +use alloy_consensus::{transaction::Recovered, Transaction as _, Typed2718}; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use alloy_primitives::{Address, BlockHash, Bytes, ChainId, TxKind, B256, U256}; +use alloy_serde::OtherFields; +use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; +use serde::{Deserialize, Serialize}; + +mod request; +pub use request::ScrollTransactionRequest; + +/// Scroll Transaction type +#[derive( + Clone, Debug, PartialEq, Eq, Serialize, Deserialize, derive_more::Deref, derive_more::DerefMut, +)] +#[serde(try_from = "tx_serde::TransactionSerdeHelper", into = "tx_serde::TransactionSerdeHelper")] +#[cfg_attr(all(any(test, feature = "arbitrary"), feature = "k256"), derive(arbitrary::Arbitrary))] +pub struct Transaction { + /// Ethereum Transaction Types + #[deref] + #[deref_mut] + pub inner: alloy_rpc_types_eth::Transaction<ScrollTxEnvelope>, +} + +impl Transaction { + /// Returns a rpc [`Transaction`] with a [`ScrollTransactionInfo`] and + /// [`Recovered<ScrollTxEnvelope>`] as input. + pub fn from_transaction( + tx: Recovered<ScrollTxEnvelope>, + tx_info: ScrollTransactionInfo, + ) -> Self { + let base_fee = tx_info.inner.base_fee; + let effective_gas_price = if tx.is_l1_message() { + // For l1 messages, we set the `gasPrice` field to 0 in rpc + 0 + } else { + // TODO: should we get the pool base fee in the case where the transaction is a pending + // transaction here? + base_fee + .map(|base_fee| { + tx.effective_tip_per_gas(base_fee).unwrap_or_default() + base_fee as u128 + }) + .unwrap_or_else(|| tx.max_fee_per_gas()) + }; + + Self { + inner: alloy_rpc_types_eth::Transaction { + inner: tx, + block_hash: tx_info.inner.block_hash, + block_number: tx_info.inner.block_number, + transaction_index: tx_info.inner.index, + effective_gas_price: Some(effective_gas_price), + }, + } + } +} + +impl Typed2718 for Transaction { + fn ty(&self) -> u8 { + self.inner.ty() + } +} + +impl alloy_consensus::Transaction for Transaction { + fn chain_id(&self) -> Option<ChainId> { + self.inner.chain_id() + } + + fn nonce(&self) -> u64 { + self.inner.nonce() + } + + fn gas_limit(&self) -> u64 { + self.inner.gas_limit() + } + + fn gas_price(&self) -> Option<u128> { + self.inner.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.inner.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + self.inner.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option<u128> { + self.inner.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.inner.priority_fee_or_price() + } + + fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 { + self.inner.effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.inner.is_dynamic_fee() + } + + fn kind(&self) -> TxKind { + self.inner.kind() + } + + fn is_create(&self) -> bool { + self.inner.is_create() + } + + fn to(&self) -> Option<Address> { + self.inner.to() + } + + fn value(&self) -> U256 { + self.inner.value() + } + + fn input(&self) -> &Bytes { + self.inner.input() + } + + fn access_list(&self) -> Option<&AccessList> { + self.inner.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.inner.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.inner.authorization_list() + } +} + +impl alloy_network_primitives::TransactionResponse for Transaction { + fn tx_hash(&self) -> alloy_primitives::TxHash { + self.inner.tx_hash() + } + + fn block_hash(&self) -> Option<BlockHash> { + self.inner.block_hash() + } + + fn block_number(&self) -> Option<u64> { + self.inner.block_number() + } + + fn transaction_index(&self) -> Option<u64> { + self.inner.transaction_index() + } + + fn from(&self) -> Address { + self.inner.from() + } +} + +/// Scroll specific transaction fields +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ScrollL1MessageTransactionFields { + /// The index of the transaction in the message queue. + pub queue_index: u64, + /// The sender of the transaction on the L1. + pub sender: Address, +} + +impl From<ScrollL1MessageTransactionFields> for OtherFields { + fn from(value: ScrollL1MessageTransactionFields) -> Self { + serde_json::to_value(value).unwrap().try_into().unwrap() + } +} + +impl AsRef<ScrollTxEnvelope> for Transaction { + fn as_ref(&self) -> &ScrollTxEnvelope { + self.inner.as_ref() + } +} + +mod tx_serde { + //! Helper module for serializing and deserializing Scroll [`Transaction`]. + //! + //! This is needed because we might need to deserialize the `from` field into both + //! [`alloy_rpc_types_eth::Transaction::from`] and + //! [`scroll_alloy_consensus::TxL1Message`]. + //! + //! Additionally, we need similar logic for the `gasPrice` field + + use super::*; + use alloy_consensus::transaction::Recovered; + use serde::de::Error; + + /// Helper struct which will be flattened into the transaction and will only contain `from` + /// field if inner [`ScrollTxEnvelope`] did not consume it. + #[derive(Serialize, Deserialize)] + struct OptionalFields { + #[serde(default, skip_serializing_if = "Option::is_none")] + from: Option<Address>, + #[serde( + default, + rename = "gasPrice", + skip_serializing_if = "Option::is_none", + with = "alloy_serde::quantity::opt" + )] + effective_gas_price: Option<u128>, + } + + #[derive(Serialize, Deserialize)] + #[serde(rename_all = "camelCase")] + pub(crate) struct TransactionSerdeHelper { + #[serde(flatten)] + inner: ScrollTxEnvelope, + #[serde(default)] + block_hash: Option<BlockHash>, + #[serde(default, with = "alloy_serde::quantity::opt")] + block_number: Option<u64>, + #[serde(default, with = "alloy_serde::quantity::opt")] + transaction_index: Option<u64>, + #[serde(flatten)] + other: OptionalFields, + } + + impl From<Transaction> for TransactionSerdeHelper { + fn from(value: Transaction) -> Self { + let Transaction { + inner: + alloy_rpc_types_eth::Transaction { + inner: recovered, + block_hash, + block_number, + transaction_index, + effective_gas_price, + }, + .. + } = value; + + // if inner transaction has its own `gasPrice` don't serialize it in this struct. + let effective_gas_price = + effective_gas_price.filter(|_| recovered.gas_price().is_none()); + let (inner, from) = recovered.into_parts(); + + Self { + inner, + block_hash, + block_number, + transaction_index, + other: OptionalFields { from: Some(from), effective_gas_price }, + } + } + } + + impl TryFrom<TransactionSerdeHelper> for Transaction { + type Error = serde_json::Error; + + fn try_from(value: TransactionSerdeHelper) -> Result<Self, Self::Error> { + let TransactionSerdeHelper { + inner, + block_hash, + block_number, + transaction_index, + other, + } = value; + + // Try to get `from` field from inner envelope or from `MaybeFrom`, otherwise return + // error + let from = if let Some(from) = other.from { + from + } else if let ScrollTxEnvelope::L1Message(tx) = &inner { + tx.sender + } else { + return Err(serde_json::Error::custom("missing `from` field")); + }; + + let effective_gas_price = other.effective_gas_price.or_else(|| inner.gas_price()); + let recovered = Recovered::new_unchecked(inner, from); + + Ok(Self { + inner: alloy_rpc_types_eth::Transaction { + inner: recovered, + block_hash, + block_number, + transaction_index, + effective_gas_price, + }, + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::address; + + #[test] + fn can_deserialize_l1_messages() { + // cast rpc eth_getTransactionByHash + // 0x5c1c3785c8bf5d7f1cb714abd1d22e32642887215602c3a14a5e9ee105bad6aa --rpc-url https://rpc.scroll.io + let rpc_tx = r#"{"blockHash":"0x018ed80ea8340984a1f4841490284d6e51d71f9e9411feeca41e007a89fbfdff","blockNumber":"0xb81121","from":"0x7885bcbd5cecef1336b5300fb5186a12ddd8c478","gas":"0x1e8480","gasPrice":"0x0","hash":"0x5c1c3785c8bf5d7f1cb714abd1d22e32642887215602c3a14a5e9ee105bad6aa","input":"0x8ef1332e000000000000000000000000c186fa914353c44b2e33ebe05f21846f1048beda0000000000000000000000003bad7ad0728f9917d1bf08af5782dcbd516cdd96000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e7ba000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000044493a4f846ffc1507cbfe98a2b0ba1f06ea7e4eb749c001f78f6cb5540daa556a0566322a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","to":"0x781e90f1c8fc4611c9b7497c3b47f99ef6969cbc","transactionIndex":"0x0","value":"0x0","type":"0x7e","v":"0x0","r":"0x0","s":"0x0","sender":"0x7885bcbd5cecef1336b5300fb5186a12ddd8c478","queueIndex":"0xe7ba0", "yParity":"0x0"}"#; + + let tx = serde_json::from_str::<Transaction>(rpc_tx).unwrap(); + + let ScrollTxEnvelope::L1Message(inner) = tx.as_ref() else { + panic!("Expected deposit transaction"); + }; + assert_eq!(inner.sender, address!("7885bcbd5cecef1336b5300fb5186a12ddd8c478")); + assert_eq!(inner.queue_index, 0xe7ba0); + assert_eq!(tx.inner.effective_gas_price, Some(0)); + + let deserialized = serde_json::to_value(&tx).unwrap(); + let expected = serde_json::from_str::<serde_json::Value>(rpc_tx).unwrap(); + similar_asserts::assert_eq!(deserialized, expected); + } +}
diff --git reth/crates/scroll/alloy/rpc-types/src/transaction/request.rs scroll-reth/crates/scroll/alloy/rpc-types/src/transaction/request.rs new file mode 100644 index 0000000000000000000000000000000000000000..b8e1b9cc060e316485f247c996c5383b304e1561 --- /dev/null +++ scroll-reth/crates/scroll/alloy/rpc-types/src/transaction/request.rs @@ -0,0 +1,184 @@ +use alloy_consensus::{ + Sealed, SignableTransaction, Signed, TxEip1559, TxEip4844, TypedTransaction, +}; +use alloy_primitives::{Address, Signature, TxKind, U256}; +use alloy_rpc_types_eth::{AccessList, TransactionInput, TransactionRequest}; +use serde::{Deserialize, Serialize}; + +use scroll_alloy_consensus::{ScrollTxEnvelope, ScrollTypedTransaction, TxL1Message}; + +/// `ScrollTransactionRequest` is a wrapper around the `TransactionRequest` struct. +/// This struct derives several traits to facilitate easier use and manipulation +/// in the codebase. +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Hash, + derive_more::From, + derive_more::AsRef, + derive_more::AsMut, + Serialize, + Deserialize, +)] +#[serde(transparent)] +pub struct ScrollTransactionRequest(TransactionRequest); + +impl ScrollTransactionRequest { + /// Sets the from field in the call to the provided address + #[inline] + pub const fn from(mut self, from: Address) -> Self { + self.0.from = Some(from); + self + } + + /// Sets the transactions type for the transactions. + #[doc(alias = "tx_type")] + pub const fn transaction_type(mut self, transaction_type: u8) -> Self { + self.0.transaction_type = Some(transaction_type); + self + } + + /// Sets the gas limit for the transaction. + pub const fn gas_limit(mut self, gas_limit: u64) -> Self { + self.0.gas = Some(gas_limit); + self + } + + /// Sets the nonce for the transaction. + pub const fn nonce(mut self, nonce: u64) -> Self { + self.0.nonce = Some(nonce); + self + } + + /// Sets the maximum fee per gas for the transaction. + pub const fn max_fee_per_gas(mut self, max_fee_per_gas: u128) -> Self { + self.0.max_fee_per_gas = Some(max_fee_per_gas); + self + } + + /// Sets the maximum priority fee per gas for the transaction. + pub const fn max_priority_fee_per_gas(mut self, max_priority_fee_per_gas: u128) -> Self { + self.0.max_priority_fee_per_gas = Some(max_priority_fee_per_gas); + self + } + + /// Sets the recipient address for the transaction. + #[inline] + pub const fn to(mut self, to: Address) -> Self { + self.0.to = Some(TxKind::Call(to)); + self + } + + /// Sets the value (amount) for the transaction. + pub const fn value(mut self, value: U256) -> Self { + self.0.value = Some(value); + self + } + + /// Sets the access list for the transaction. + pub fn access_list(mut self, access_list: AccessList) -> Self { + self.0.access_list = Some(access_list); + self + } + + /// Sets the input data for the transaction. + pub fn input(mut self, input: TransactionInput) -> Self { + self.0.input = input; + self + } + + /// Builds [`ScrollTypedTransaction`] from this builder. See + /// [`TransactionRequest::build_typed_tx`] for more info. + /// + /// Note that EIP-4844 transactions are not supported by Scroll and will be converted into + /// EIP-1559 transactions. + pub fn build_typed_tx(self) -> Result<ScrollTypedTransaction, Self> { + let tx = self.0.build_typed_tx().map_err(Self)?; + match tx { + TypedTransaction::Legacy(tx) => Ok(ScrollTypedTransaction::Legacy(tx)), + TypedTransaction::Eip1559(tx) => Ok(ScrollTypedTransaction::Eip1559(tx)), + TypedTransaction::Eip2930(tx) => Ok(ScrollTypedTransaction::Eip2930(tx)), + TypedTransaction::Eip4844(tx) => { + let tx: TxEip4844 = tx.into(); + Ok(ScrollTypedTransaction::Eip1559(TxEip1559 { + chain_id: tx.chain_id, + nonce: tx.nonce, + gas_limit: tx.gas_limit, + max_priority_fee_per_gas: tx.max_priority_fee_per_gas, + max_fee_per_gas: tx.max_fee_per_gas, + to: TxKind::Call(tx.to), + value: tx.value, + access_list: tx.access_list, + input: tx.input, + })) + } + TypedTransaction::Eip7702(_) => { + unimplemented!("EIP-7702 support is not implemented yet") + } + } + } +} + +impl From<TxL1Message> for ScrollTransactionRequest { + fn from(tx_l1_message: TxL1Message) -> Self { + let to = TxKind::from(tx_l1_message.to); + Self(TransactionRequest { + from: Some(tx_l1_message.sender), + to: Some(to), + value: Some(tx_l1_message.value), + gas: Some(tx_l1_message.gas_limit), + input: tx_l1_message.input.into(), + ..Default::default() + }) + } +} + +impl From<Sealed<TxL1Message>> for ScrollTransactionRequest { + fn from(value: Sealed<TxL1Message>) -> Self { + value.into_inner().into() + } +} + +impl<T> From<Signed<T, Signature>> for ScrollTransactionRequest +where + T: SignableTransaction<Signature> + Into<TransactionRequest>, +{ + fn from(value: Signed<T, Signature>) -> Self { + #[cfg(feature = "k256")] + let from = value.recover_signer().ok(); + #[cfg(not(feature = "k256"))] + let from = None; + + let mut inner: TransactionRequest = value.strip_signature().into(); + inner.from = from; + + Self(inner) + } +} + +impl From<ScrollTypedTransaction> for ScrollTransactionRequest { + fn from(tx: ScrollTypedTransaction) -> Self { + match tx { + ScrollTypedTransaction::Legacy(tx) => Self(tx.into()), + ScrollTypedTransaction::Eip2930(tx) => Self(tx.into()), + ScrollTypedTransaction::Eip1559(tx) => Self(tx.into()), + ScrollTypedTransaction::Eip7702(tx) => Self(tx.into()), + ScrollTypedTransaction::L1Message(tx) => tx.into(), + } + } +} + +impl From<ScrollTxEnvelope> for ScrollTransactionRequest { + fn from(value: ScrollTxEnvelope) -> Self { + match value { + ScrollTxEnvelope::Eip2930(tx) => tx.into(), + ScrollTxEnvelope::Eip1559(tx) => tx.into(), + ScrollTxEnvelope::Eip7702(tx) => tx.into(), + ScrollTxEnvelope::L1Message(tx) => tx.into(), + _ => Default::default(), + } + } +}
diff --git reth/crates/scroll/bin/scroll-reth/Cargo.toml scroll-reth/crates/scroll/bin/scroll-reth/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..41fe2849bf4b844fa6352fe8e61e61390f8bb2ea --- /dev/null +++ scroll-reth/crates/scroll/bin/scroll-reth/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "scroll-reth" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-cli-util.workspace = true + +# scroll +reth-scroll-cli.workspace = true +reth-scroll-node.workspace = true + +# misc +clap = { workspace = true, features = ["derive", "env"] } +tracing.workspace = true + +[features] +dev = ["reth-scroll-cli/dev"] +js-tracer = ["reth-scroll-node/js-tracer"] + +[[bin]] +name = "scroll-reth" +path = "src/main.rs"
diff --git reth/crates/scroll/bin/scroll-reth/src/main.rs scroll-reth/crates/scroll/bin/scroll-reth/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..d65e33aabd403044396274a1a549ee6d7bbd2337 --- /dev/null +++ scroll-reth/crates/scroll/bin/scroll-reth/src/main.rs @@ -0,0 +1,32 @@ +//! Scroll binary + +#[global_allocator] +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); + +fn main() { + use clap::Parser; + use reth_scroll_cli::{Cli, ScrollChainSpecParser}; + use reth_scroll_node::{ScrollNode, ScrollRollupArgs}; + use tracing::info; + + reth_cli_util::sigsegv_handler::install(); + + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + unsafe { + std::env::set_var("RUST_BACKTRACE", "1"); + } + } + + if let Err(err) = + Cli::<ScrollChainSpecParser, ScrollRollupArgs>::parse().run(|builder, args| async move { + info!(target: "reth::cli", "Launching node"); + let handle = + builder.node(ScrollNode::new(args)).launch_with_debug_capabilities().await?; + handle.node_exit_future.await + }) + { + eprintln!("Error: {err:?}"); + std::process::exit(1); + } +}
diff --git reth/crates/scroll/chainspec/Cargo.toml scroll-reth/crates/scroll/chainspec/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..60298bef3e021cd099922ddb579313d11f8c99e0 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "reth-scroll-chainspec" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "EVM chain spec implementation for scroll." + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec = { workspace = true, default-features = false } +reth-ethereum-forks = { workspace = true, default-features = false } +reth-network-peers = { workspace = true, default-features = false } +reth-primitives-traits = { workspace = true, default-features = false } +reth-trie-common = { workspace = true, default-features = false } + +# scroll +reth-scroll-forks = { workspace = true, default-features = false } +scroll-alloy-hardforks = { workspace = true, default-features = false } + +# ethereum +alloy-chains = { workspace = true, default-features = false } +alloy-genesis = { workspace = true, default-features = false } +alloy-primitives = { workspace = true, default-features = false } +alloy-consensus = { workspace = true, default-features = false } +alloy-eips = { workspace = true, default-features = false } +alloy-serde = { workspace = true, default-features = false } + +# io +serde_json = { workspace = true, default-features = false } +serde = { workspace = true, default-features = false, features = ["derive"] } + +# misc +auto_impl.workspace = true +derive_more = { workspace = true, default-features = false } +once_cell = { workspace = true, default-features = false } + +[dev-dependencies] +alloy-genesis.workspace = true +reth-chainspec = { workspace = true, features = ["test-utils"] } + +[features] +default = ["std"] +std = [ + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-eips/std", + "alloy-serde/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-scroll-forks/std", + "reth-trie-common/std", + "alloy-consensus/std", + "once_cell/std", + "serde/std", + "derive_more/std", + "reth-network-peers/std", + "serde_json/std", + "scroll-alloy-hardforks/std", +]
diff --git reth/crates/scroll/chainspec/res/genesis/dev.json scroll-reth/crates/scroll/chainspec/res/genesis/dev.json new file mode 100644 index 0000000000000000000000000000000000000000..9297f45b248308e80abd86287e5990ee227db4d5 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/res/genesis/dev.json @@ -0,0 +1,93 @@ +{ + "nonce": "0x0", + "timestamp": "0x6490fdd2", + "extraData": "0x", + "gasLimit": "0x1312D00", + "baseFeePerGas": "0x0", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x90F79bf6EB2c4f870365E785982E1f101E93b906": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x976EA74026E726554dB657fA54763abd0C3a0aa9": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xBcd4042DE499D14e55001CcbB24a551F3b954096": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x71bE63f3384f5fb98995898A86B02Fb2426c5788": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xFABB0ac9d68B0B445fB7357272Ff202C5651694a": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xcd3B766CCDd6AE721141F452C550Ca635964ce71": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x2546BcD3c84621e976D8185a91A922aE77ECEc30": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xbDA5747bFD65F08deb54cb465eB87D40e51B197E": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xdD2FD4581271e230360230F9337D5c0430Bf44C0": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x5300000000000000000000000000000000000002": { + "balance": "0xd3c21bcecceda1000000", + "code": "0x608060405234801561000f575f80fd5b50600436106101c6575f3560e01c8063715018a6116100fe578063bede39b51161009e578063e88a60ad1161006e578063e88a60ad1461035d578063f2fde38b14610370578063f45e65d814610383578063fe5b04151461038c575f80fd5b8063bede39b51461031c578063c63b9e2d1461032f578063c91e514914610342578063de26c4a11461034a575f80fd5b80638da5cb5b116100d95780638da5cb5b146102c457806393e59dc1146102ee578063944b247f14610301578063a911d77f14610314575f80fd5b8063715018a6146102ab5780637f977cbf146102b357806384189161146102bb575f80fd5b80633d0f963e116101695780635471db39116101445780635471db391461027d5780636112d6db146102865780636a5e67e51461028f5780637046559714610298575f80fd5b80633d0f963e1461024e57806349948e0e14610261578063519b4bd314610274575f80fd5b806323e524ac116101a457806323e524ac146102105780633577afc51461021957806339455d3a1461022e5780633b7656bb14610241575f80fd5b80630c18c162146101ca5780630f337f6d146101e657806313dad5be14610203575b5f80fd5b6101d360025481565b6040519081526020015b60405180910390f35b600c546101f39060ff1681565b60405190151581526020016101dd565b6008546101f39060ff1681565b6101d360065481565b61022c610227366004610ccf565b610394565b005b61022c61023c366004610ce6565b610426565b600b546101f39060ff1681565b61022c61025c366004610d06565b610523565b6101d361026f366004610d47565b6105a6565b6101d360015481565b6101d360095481565b6101d3600a5481565b6101d360075481565b61022c6102a6366004610ccf565b6105f3565b61022c610681565b61022c6106b5565b6101d360055481565b5f546102d6906001600160a01b031681565b6040516001600160a01b0390911681526020016101dd565b6004546102d6906001600160a01b031681565b61022c61030f366004610ccf565b610711565b61022c61079d565b61022c61032a366004610ccf565b6107f9565b61022c61033d366004610ccf565b6108b6565b6009546101d3565b6101d3610358366004610d47565b610933565b61022c61036b366004610ccf565b61096a565b61022c61037e366004610d06565b6109f6565b6101d360035481565b61022c610a81565b5f546001600160a01b031633146103c65760405162461bcd60e51b81526004016103bd90610df2565b60405180910390fd5b621c9c388111156103ea57604051635742c80560e11b815260040160405180910390fd5b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561046d573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104919190610e29565b6104ae576040516326b3506d60e11b815260040160405180910390fd5b600182905560058190556040518281527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200160405180910390a16040518181527f9a14bfb5d18c4c3cf14cae19c23d7cf1bcede357ea40ca1f75cd49542c71c214906020015b60405180910390a15050565b5f546001600160a01b0316331461054c5760405162461bcd60e51b81526004016103bd90610df2565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f79101610517565b600c545f9060ff16156105c2576105bc82610add565b92915050565b600b5460ff16156105d6576105bc82610b55565b60085460ff16156105ea576105bc82610bb3565b6105bc82610bef565b5f546001600160a01b0316331461061c5760405162461bcd60e51b81526004016103bd90610df2565b61062c633b9aca006103e8610e5c565b81111561064c57604051631e44fdeb60e11b815260040160405180910390fd5b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a9060200161041b565b5f546001600160a01b031633146106aa5760405162461bcd60e51b81526004016103bd90610df2565b6106b35f610c20565b565b5f546001600160a01b031633146106de5760405162461bcd60e51b81526004016103bd90610df2565b600c5460ff16156107025760405163182389a760e01b815260040160405180910390fd5b600c805460ff19166001179055565b5f546001600160a01b0316331461073a5760405162461bcd60e51b81526004016103bd90610df2565b610748633b9aca0080610e5c565b8111156107685760405163874f603160e01b815260040160405180910390fd5b60068190556040518181527f2ab3f5a4ebbcbf3c24f62f5454f52f10e1a8c9dcc5acac8f19199ce881a6a1089060200161041b565b5f546001600160a01b031633146107c65760405162461bcd60e51b81526004016103bd90610df2565b60085460ff16156107ea576040516379f9c57560e01b815260040160405180910390fd5b6008805460ff19166001179055565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610840573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906108649190610e29565b610881576040516326b3506d60e11b815260040160405180910390fd5b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200161041b565b5f546001600160a01b031633146108df5760405162461bcd60e51b81526004016103bd90610df2565b805f036108fe5760405162ae184360e01b815260040160405180910390fd5b600a8190556040518181527f8647cebb7e57360673a28415c0bed2f68c42a86c5035f1c9b2eda2b09509288a9060200161041b565b600c545f9060ff16806109485750600b5460ff165b80610955575060085460ff165b1561096157505f919050565b6105bc82610c6f565b5f546001600160a01b031633146109935760405162461bcd60e51b81526004016103bd90610df2565b6109a1633b9aca0080610e5c565b8111156109c15760405163f37ec21560e01b815260040160405180910390fd5b60078190556040518181527f6b332a036d8c3ead57dcb06c87243bd7a2aed015ddf2d0528c2501dae56331aa9060200161041b565b5f546001600160a01b03163314610a1f5760405162461bcd60e51b81526004016103bd90610df2565b6001600160a01b038116610a755760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016103bd565b610a7e81610c20565b50565b5f546001600160a01b03163314610aaa5760405162461bcd60e51b81526004016103bd90610df2565b600b5460ff1615610ace57604051631a7c228b60e21b815260040160405180910390fd5b600b805460ff19166001179055565b5f808251600554600754610af19190610e5c565b600154600654610b019190610e5c565b610b0b9190610e73565b610b159190610e5c565b90505f600a54845183610b289190610e5c565b610b329190610e86565b9050633b9aca00610b438284610e73565b610b4d9190610e86565b949350505050565b5f633b9aca0080600a548451600554600754610b719190610e5c565b600154600654610b819190610e5c565b610b8b9190610e73565b610b959190610e5c565b610b9f9190610e5c565b610ba99190610e86565b6105bc9190610e86565b5f633b9aca006005548351600754610bcb9190610e5c565b610bd59190610e5c565b600154600654610be59190610e5c565b610ba99190610e73565b5f80610bfa83610c6f565b90505f60015482610c0b9190610e5c565b9050633b9aca0060035482610b439190610e5c565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80515f908190815b81811015610cc057848181518110610c9157610c91610ea5565b01602001516001600160f81b0319165f03610cb157600483019250610cb8565b6010830192505b600101610c77565b50506002540160400192915050565b5f60208284031215610cdf575f80fd5b5035919050565b5f8060408385031215610cf7575f80fd5b50508035926020909101359150565b5f60208284031215610d16575f80fd5b81356001600160a01b0381168114610d2c575f80fd5b9392505050565b634e487b7160e01b5f52604160045260245ffd5b5f60208284031215610d57575f80fd5b813567ffffffffffffffff80821115610d6e575f80fd5b818401915084601f830112610d81575f80fd5b813581811115610d9357610d93610d33565b604051601f8201601f19908116603f01168101908382118183101715610dbb57610dbb610d33565b81604052828152876020848701011115610dd3575f80fd5b826020860160208301375f928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b5f60208284031215610e39575f80fd5b81518015158114610d2c575f80fd5b634e487b7160e01b5f52601160045260245ffd5b80820281158282048414176105bc576105bc610e48565b808201808211156105bc576105bc610e48565b5f82610ea057634e487b7160e01b5f52601260045260245ffd5b500490565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a", + "storage": { + "0x01": "0x000000000000000000000000000000000000000000000000000000003758e6b0", + "0x02": "0x0000000000000000000000000000000000000000000000000000000000000038", + "0x03": "0x000000000000000000000000000000000000000000000000000000003e95ba80", + "0x04": "0x0000000000000000000000005300000000000000000000000000000000000003", + "0x05": "0x000000000000000000000000000000000000000000000000000000008390c2c1", + "0x06": "0x00000000000000000000000000000000000000000000000000000069cf265bfe", + "0x07": "0x00000000000000000000000000000000000000000000000000000000168b9aa3", + "0x08": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x09": "0x000000000000000000000000000000000000000000000000000000003b9aca00", + "0x0a": "0x000000000000000000000000000000000000000000000000000000003b9aca00", + "0x0b": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0c": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +}
diff --git reth/crates/scroll/chainspec/res/genesis/scroll.json scroll-reth/crates/scroll/chainspec/res/genesis/scroll.json new file mode 100644 index 0000000000000000000000000000000000000000..ef6360ac9d90367c5029a8f6858c062a88866cef --- /dev/null +++ scroll-reth/crates/scroll/chainspec/res/genesis/scroll.json @@ -0,0 +1 @@ +{"config":{"chainId":534352,"homesteadBlock":0,"eip150Block":0,"eip150Hash":"0x0000000000000000000000000000000000000000000000000000000000000000","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"archimedesBlock":0,"shanghaiTime":0,"bernoulliBlock":5220340,"curieBlock":7096836,"darwinTime":1724227200,"darwinV2Time":1725264000,"clique":{"period":3,"epoch":30000,"relaxed_period":true},"scroll":{"useZktrie":true,"maxTxPerBlock":100,"maxTxPayloadBytesPerBlock":122880,"feeVaultAddress":"0x5300000000000000000000000000000000000005","l1Config":{"l1ChainId":"1","l1MessageQueueAddress":"0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B","scrollChainAddress":"0xa13BAF47339d63B743e7Da8741db5456DAc1E556","numL1MessagesPerBlock":"10"}}},"nonce":"0x0","timestamp":"0x6524e860","extraData":"0x4c61206573746f6e7465636f206573746173206d616c6665726d6974612e0000d2ACF5d16a983DB0d909d9D761B8337Fabd6cBd10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","gasLimit":"10000000","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0xF9062b8a30e0d7722960e305049FA50b86ba6253":{"balance":"2000000000000000000"},"0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC":{"balance":"226156424291633194186662080095093570025917938800079226637565593765455331328"},"0x5300000000000000000000000000000000000000":{"balance":"0x0","code":"0x608060405234801561001057600080fd5b50600436106100935760003560e01c806383cc76601161006657806383cc7660146100fc5780638da5cb5b1461010f578063c4d66de814610122578063d4b9f4fa14610135578063f2fde38b1461013e57600080fd5b806326aad7b7146100985780633cb747bf146100b4578063600a2e77146100df578063715018a6146100f2575b600080fd5b6100a160015481565b6040519081526020015b60405180910390f35b6053546100c7906001600160a01b031681565b6040516001600160a01b0390911681526020016100ab565b6100a16100ed36600461054a565b610151565b6100fa6101f6565b005b6100a161010a36600461054a565b61022c565b6052546100c7906001600160a01b031681565b6100fa610130366004610563565b610243565b6100a160005481565b6100fa61014c366004610563565b6102db565b6053546000906001600160a01b031633146101a45760405162461bcd60e51b815260206004820152600e60248201526d37b7363c9036b2b9b9b2b733b2b960911b60448201526064015b60405180910390fd5b6000806101b084610367565b60408051838152602081018890529294509092507ffaa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693910160405180910390a19392505050565b6052546001600160a01b031633146102205760405162461bcd60e51b815260040161019b90610593565b61022a6000610486565b565b602a816028811061023c57600080fd5b0154905081565b6052546001600160a01b0316331461026d5760405162461bcd60e51b815260040161019b90610593565b600154156102b15760405162461bcd60e51b815260206004820152601160248201527063616e6e6f7420696e697469616c697a6560781b604482015260640161019b565b6102b96104d8565b605380546001600160a01b0319166001600160a01b0392909216919091179055565b6052546001600160a01b031633146103055760405162461bcd60e51b815260040161019b90610593565b6001600160a01b03811661035b5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161019b565b61036481610486565b50565b60035460009081906103bb5760405162461bcd60e51b815260206004820152601a60248201527f63616c6c206265666f726520696e697469616c697a6174696f6e000000000000604482015260640161019b565b6001548360005b8215610456576103d36002846105e0565b60000361041f5781602a82602881106103ee576103ee6105ca565b01556104188260028360288110610407576104076105ca565b015460009182526020526040902090565b915061044a565b610447602a8260288110610435576104356105ca565b01548360009182526020526040902090565b91505b600192831c92016103c2565b81602a826028811061046a5761046a6105ca565b0155506000819055600180548082019091559590945092505050565b605280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b60005b60286104e8826001610618565b10156103645761051960028260288110610504576105046105ca565b015460028360288110610407576104076105ca565b6002610526836001610618565b60288110610536576105366105ca565b01558061054281610631565b9150506104db565b60006020828403121561055c57600080fd5b5035919050565b60006020828403121561057557600080fd5b81356001600160a01b038116811461058c57600080fd5b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b600052603260045260246000fd5b6000826105fd57634e487b7160e01b600052601260045260246000fd5b500690565b634e487b7160e01b600052601160045260246000fd5b8082018082111561062b5761062b610602565b92915050565b60006001820161064357610643610602565b506001019056fea26469706673582212208fb1cb9933bb17dd0a7c17de7c890919b08d2fd7eb2bede7b41caa32709b30b564736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000052":"0xF9062b8a30e0d7722960e305049FA50b86ba6253"}},"0x5300000000000000000000000000000000000002":{"balance":"0x0","code":"0x608060405234801561001057600080fd5b50600436106100cf5760003560e01c8063715018a61161008c578063bede39b511610066578063bede39b51461018d578063de26c4a1146101a0578063f2fde38b146101b3578063f45e65d8146101c657600080fd5b8063715018a6146101475780638da5cb5b1461014f57806393e59dc11461017a57600080fd5b80630c18c162146100d45780633577afc5146100f05780633d0f963e1461010557806349948e0e14610118578063519b4bd31461012b5780637046559714610134575b600080fd5b6100dd60025481565b6040519081526020015b60405180910390f35b6101036100fe366004610671565b6101cf565b005b61010361011336600461068a565b610291565b6100dd6101263660046106d0565b61031c565b6100dd60015481565b610103610142366004610671565b610361565b610103610416565b600054610162906001600160a01b031681565b6040516001600160a01b0390911681526020016100e7565b600454610162906001600160a01b031681565b61010361019b366004610671565b61044c565b6100dd6101ae3660046106d0565b610533565b6101036101c136600461068a565b610595565b6100dd60035481565b6000546001600160a01b031633146102025760405162461bcd60e51b81526004016101f990610781565b60405180910390fd5b621c9c388111156102555760405162461bcd60e51b815260206004820152601760248201527f657863656564206d6178696d756d206f7665726865616400000000000000000060448201526064016101f9565b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6000546001600160a01b031633146102bb5760405162461bcd60e51b81526004016101f990610781565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f7910160405180910390a15050565b60008061032883610533565b905060006001548261033a91906107b8565b9050633b9aca006003548261034f91906107b8565b61035991906107e5565b949350505050565b6000546001600160a01b0316331461038b5760405162461bcd60e51b81526004016101f990610781565b61039b633b9aca006103e86107b8565b8111156103e15760405162461bcd60e51b8152602060048201526014602482015273657863656564206d6178696d756d207363616c6560601b60448201526064016101f9565b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a90602001610286565b6000546001600160a01b031633146104405760405162461bcd60e51b81526004016101f990610781565b61044a6000610621565b565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610495573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104b99190610807565b6104fe5760405162461bcd60e51b81526020600482015260166024820152752737ba103bb434ba32b634b9ba32b21039b2b73232b960511b60448201526064016101f9565b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c4490602001610286565b80516000908190815b818110156105865784818151811061055657610556610829565b01602001516001600160f81b0319166000036105775760048301925061057e565b6010830192505b60010161053c565b50506002540160400192915050565b6000546001600160a01b031633146105bf5760405162461bcd60e51b81526004016101f990610781565b6001600160a01b0381166106155760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016101f9565b61061e81610621565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561068357600080fd5b5035919050565b60006020828403121561069c57600080fd5b81356001600160a01b03811681146106b357600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000602082840312156106e257600080fd5b813567ffffffffffffffff808211156106fa57600080fd5b818401915084601f83011261070e57600080fd5b813581811115610720576107206106ba565b604051601f8201601f19908116603f01168101908382118183101715610748576107486106ba565b8160405282815287602084870101111561076157600080fd5b826020860160208301376000928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b60008160001904831182151516156107e057634e487b7160e01b600052601160045260246000fd5b500290565b60008261080257634e487b7160e01b600052601260045260246000fd5b500490565b60006020828403121561081957600080fd5b815180151581146106b357600080fd5b634e487b7160e01b600052603260045260246000fdfea26469706673582212205ea335809638809cf032c794fd966e2439020737b1dcc2218435cb438286efcf64736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0xF9062b8a30e0d7722960e305049FA50b86ba6253"}},"0x5300000000000000000000000000000000000003":{"balance":"0x0","code":"0x608060405234801561001057600080fd5b50600436106100575760003560e01c8063715018a61461005c57806379586dd7146100665780638da5cb5b14610079578063efc78401146100a9578063f2fde38b146100e5575b600080fd5b6100646100f8565b005b610064610074366004610356565b610137565b60005461008c906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b6100d56100b736600461042d565b6001600160a01b031660009081526001602052604090205460ff1690565b60405190151581526020016100a0565b6100646100f336600461042d565b610238565b6000546001600160a01b0316331461012b5760405162461bcd60e51b81526004016101229061044f565b60405180910390fd5b61013560006102c4565b565b6000546001600160a01b031633146101615760405162461bcd60e51b81526004016101229061044f565b60005b825181101561023357816001600085848151811061018457610184610486565b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060006101000a81548160ff0219169083151502179055508281815181106101d5576101d5610486565b60200260200101516001600160a01b03167f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d83604051610219911515815260200190565b60405180910390a28061022b8161049c565b915050610164565b505050565b6000546001600160a01b031633146102625760405162461bcd60e51b81526004016101229061044f565b6001600160a01b0381166102b85760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610122565b6102c1816102c4565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b634e487b7160e01b600052604160045260246000fd5b80356001600160a01b038116811461034157600080fd5b919050565b8035801515811461034157600080fd5b6000806040838503121561036957600080fd5b823567ffffffffffffffff8082111561038157600080fd5b818501915085601f83011261039557600080fd5b81356020828211156103a9576103a9610314565b8160051b604051601f19603f830116810181811086821117156103ce576103ce610314565b6040529283528183019350848101820192898411156103ec57600080fd5b948201945b83861015610411576104028661032a565b855294820194938201936103f1565b96506104209050878201610346565b9450505050509250929050565b60006020828403121561043f57600080fd5b6104488261032a565b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b600052603260045260246000fd5b6000600182016104bc57634e487b7160e01b600052601160045260246000fd5b506001019056fea26469706673582212203414b076e92b618bd7c3437159d7bceb2acc3a5c82f51f383465512d9c52e97064736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0xF9062b8a30e0d7722960e305049FA50b86ba6253"}},"0x5300000000000000000000000000000000000004":{"balance":"0x0","code":"0x6080604052600436106101025760003560e01c806370a0823111610095578063a457c2d711610064578063a457c2d7146102b4578063a9059cbb146102d4578063d0e30db0146102f4578063d505accf146102fc578063dd62ed3e1461031c57600080fd5b806370a08231146102215780637ecebe001461025757806384b0196e1461027757806395d89b411461029f57600080fd5b80632e1a7d4d116100d15780632e1a7d4d146101b0578063313ce567146101d05780633644e515146101ec578063395093511461020157600080fd5b806306fdde0314610116578063095ea7b31461014157806318160ddd1461017157806323b872dd1461019057600080fd5b366101115761010f61033c565b005b600080fd5b34801561012257600080fd5b5061012b61038d565b60405161013891906112fa565b60405180910390f35b34801561014d57600080fd5b5061016161015c366004611330565b61041f565b6040519015158152602001610138565b34801561017d57600080fd5b506002545b604051908152602001610138565b34801561019c57600080fd5b506101616101ab36600461135a565b610439565b3480156101bc57600080fd5b5061010f6101cb366004611396565b61045d565b3480156101dc57600080fd5b5060405160128152602001610138565b3480156101f857600080fd5b5061018261054e565b34801561020d57600080fd5b5061016161021c366004611330565b61055d565b34801561022d57600080fd5b5061018261023c3660046113af565b6001600160a01b031660009081526020819052604090205490565b34801561026357600080fd5b506101826102723660046113af565b61057f565b34801561028357600080fd5b5061028c61059d565b60405161013897969594939291906113ca565b3480156102ab57600080fd5b5061012b610626565b3480156102c057600080fd5b506101616102cf366004611330565b610635565b3480156102e057600080fd5b506101616102ef366004611330565b6106b0565b61010f61033c565b34801561030857600080fd5b5061010f610317366004611460565b6106be565b34801561032857600080fd5b506101826103373660046114d3565b610822565b336103478134610881565b806001600160a01b03167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c3460405161038291815260200190565b60405180910390a250565b60606003805461039c90611506565b80601f01602080910402602001604051908101604052809291908181526020018280546103c890611506565b80156104155780601f106103ea57610100808354040283529160200191610415565b820191906000526020600020905b8154815290600101906020018083116103f857829003601f168201915b5050505050905090565b60003361042d818585610940565b60019150505b92915050565b600033610447858285610a65565b610452858585610adf565b506001949350505050565b336104688183610c83565b6000816001600160a01b03168360405160006040518083038185875af1925050503d80600081146104b5576040519150601f19603f3d011682016040523d82523d6000602084013e6104ba565b606091505b50509050806105065760405162461bcd60e51b81526020600482015260136024820152721dda5d1a191c985dc81155120819985a5b1959606a1b60448201526064015b60405180910390fd5b816001600160a01b03167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b658460405161054191815260200190565b60405180910390a2505050565b6000610558610db2565b905090565b60003361042d8185856105708383610822565b61057a919061153a565b610940565b6001600160a01b038116600090815260076020526040812054610433565b6000606080828080836105d17f577261707065642045746865720000000000000000000000000000000000000d6005610edd565b6105fc7f31000000000000000000000000000000000000000000000000000000000000016006610edd565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b60606004805461039c90611506565b600033816106438286610822565b9050838110156106a35760405162461bcd60e51b815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b60648201526084016104fd565b6104528286868403610940565b60003361042d818585610adf565b8342111561070e5760405162461bcd60e51b815260206004820152601d60248201527f45524332305065726d69743a206578706972656420646561646c696e6500000060448201526064016104fd565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c988888861073d8c610f81565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e001604051602081830303815290604052805190602001209050600061079882610fa9565b905060006107a882878787610fd6565b9050896001600160a01b0316816001600160a01b03161461080b5760405162461bcd60e51b815260206004820152601e60248201527f45524332305065726d69743a20696e76616c6964207369676e6174757265000060448201526064016104fd565b6108168a8a8a610940565b50505050505050505050565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b60006020835110156108695761086283610ffe565b9050610433565b8161087484826115bf565b5060ff9050610433565b90565b6001600160a01b0382166108d75760405162461bcd60e51b815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f20616464726573730060448201526064016104fd565b80600260008282546108e9919061153a565b90915550506001600160a01b038216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b6001600160a01b0383166109a25760405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b60648201526084016104fd565b6001600160a01b038216610a035760405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b60648201526084016104fd565b6001600160a01b0383811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591015b60405180910390a3505050565b6000610a718484610822565b90506000198114610ad95781811015610acc5760405162461bcd60e51b815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e636500000060448201526064016104fd565b610ad98484848403610940565b50505050565b6001600160a01b038316610b435760405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b60648201526084016104fd565b6001600160a01b038216610ba55760405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b60648201526084016104fd565b6001600160a01b03831660009081526020819052604090205481811015610c1d5760405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b60648201526084016104fd565b6001600160a01b03848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610ad9565b6001600160a01b038216610ce35760405162461bcd60e51b815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f206164647265736044820152607360f81b60648201526084016104fd565b6001600160a01b03821660009081526020819052604090205481811015610d575760405162461bcd60e51b815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e604482015261636560f01b60648201526084016104fd565b6001600160a01b0383166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9101610a58565b505050565b6000306001600160a01b037f000000000000000000000000530000000000000000000000000000000000000416148015610e0b57507f000000000000000000000000000000000000000000000000000000000008275046145b15610e3557507fe5b117a3cd7ae7ed3508e6e6c5a0794536b2a8dee12533c4d7524eae9c85438f90565b610558604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f00cd3d46df44f2cbb950cf84eb2e92aa2ddd23195b1a009173ea59a063357ed3918101919091527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc660608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b606060ff8314610ef0576108628361103c565b818054610efc90611506565b80601f0160208091040260200160405190810160405280929190818152602001828054610f2890611506565b8015610f755780601f10610f4a57610100808354040283529160200191610f75565b820191906000526020600020905b815481529060010190602001808311610f5857829003601f168201915b50505050509050610433565b6001600160a01b03811660009081526007602052604090208054600181018255905b50919050565b6000610433610fb6610db2565b8360405161190160f01b8152600281019290925260228201526042902090565b6000806000610fe78787878761107b565b91509150610ff48161113f565b5095945050505050565b600080829050601f81511115611029578260405163305a27a960e01b81526004016104fd91906112fa565b80516110348261167f565b179392505050565b606060006110498361128c565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6000807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08311156110b25750600090506003611136565b6040805160008082526020820180845289905260ff881692820192909252606081018690526080810185905260019060a0016020604051602081039080840390855afa158015611106573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661112f57600060019250925050611136565b9150600090505b94509492505050565b6000816004811115611153576111536116a3565b0361115b5750565b600181600481111561116f5761116f6116a3565b036111bc5760405162461bcd60e51b815260206004820152601860248201527f45434453413a20696e76616c6964207369676e6174757265000000000000000060448201526064016104fd565b60028160048111156111d0576111d06116a3565b0361121d5760405162461bcd60e51b815260206004820152601f60248201527f45434453413a20696e76616c6964207369676e6174757265206c656e6774680060448201526064016104fd565b6003816004811115611231576112316116a3565b036112895760405162461bcd60e51b815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202773272076616c604482015261756560f01b60648201526084016104fd565b50565b600060ff8216601f81111561043357604051632cd44ac360e21b815260040160405180910390fd5b6000815180845260005b818110156112da576020818501810151868301820152016112be565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061130d60208301846112b4565b9392505050565b80356001600160a01b038116811461132b57600080fd5b919050565b6000806040838503121561134357600080fd5b61134c83611314565b946020939093013593505050565b60008060006060848603121561136f57600080fd5b61137884611314565b925061138660208501611314565b9150604084013590509250925092565b6000602082840312156113a857600080fd5b5035919050565b6000602082840312156113c157600080fd5b61130d82611314565b60ff60f81b881681526000602060e0818401526113ea60e084018a6112b4565b83810360408501526113fc818a6112b4565b606085018990526001600160a01b038816608086015260a0850187905284810360c0860152855180825283870192509083019060005b8181101561144e57835183529284019291840191600101611432565b50909c9b505050505050505050505050565b600080600080600080600060e0888a03121561147b57600080fd5b61148488611314565b965061149260208901611314565b95506040880135945060608801359350608088013560ff811681146114b657600080fd5b9699959850939692959460a0840135945060c09093013592915050565b600080604083850312156114e657600080fd5b6114ef83611314565b91506114fd60208401611314565b90509250929050565b600181811c9082168061151a57607f821691505b602082108103610fa357634e487b7160e01b600052602260045260246000fd5b8082018082111561043357634e487b7160e01b600052601160045260246000fd5b634e487b7160e01b600052604160045260246000fd5b601f821115610dad57600081815260208120601f850160051c810160208610156115985750805b601f850160051c820191505b818110156115b7578281556001016115a4565b505050505050565b815167ffffffffffffffff8111156115d9576115d961155b565b6115ed816115e78454611506565b84611571565b602080601f831160018114611622576000841561160a5750858301515b600019600386901b1c1916600185901b1785556115b7565b600085815260208120601f198616915b8281101561165157888601518255948401946001909101908401611632565b508582101561166f5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b80516020808301519190811015610fa35760001960209190910360031b1b16919050565b634e487b7160e01b600052602160045260246000fdfea26469706673582212207f39e33e122e8e2b0381aa6abea46046f56b05ced66c556a06bb1b80be7f55cf64736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000003":"0x577261707065642045746865720000000000000000000000000000000000001a","0x0000000000000000000000000000000000000000000000000000000000000004":"0x5745544800000000000000000000000000000000000000000000000000000008"}},"0x5300000000000000000000000000000000000005":{"balance":"0x0","code":"0x6080604052600436106100ab5760003560e01c806384411d651161006457806384411d65146101845780638da5cb5b1461019a5780639e7adc79146101ba578063f2fde38b146101da578063feec756c146101fa578063ff4f35461461021a57600080fd5b80632e1a7d4d146100b75780633cb747bf146100d95780633ccfd60b14610116578063457e1a491461012b57806366d003ac1461014f578063715018a61461016f57600080fd5b366100b257005b600080fd5b3480156100c357600080fd5b506100d76100d2366004610682565b61023a565b005b3480156100e557600080fd5b506002546100f9906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b34801561012257600080fd5b506100d76103ff565b34801561013757600080fd5b5061014160015481565b60405190815260200161010d565b34801561015b57600080fd5b506003546100f9906001600160a01b031681565b34801561017b57600080fd5b506100d761040c565b34801561019057600080fd5b5061014160045481565b3480156101a657600080fd5b506000546100f9906001600160a01b031681565b3480156101c657600080fd5b506100d76101d536600461069b565b610442565b3480156101e657600080fd5b506100d76101f536600461069b565b6104be565b34801561020657600080fd5b506100d761021536600461069b565b610547565b34801561022657600080fd5b506100d7610235366004610682565b6105c3565b6001548110156102ca5760405162461bcd60e51b815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d20776974686472616064820152691dd85b08185b5bdd5b9d60b21b608482015260a4015b60405180910390fd5b478082111561032e5760405162461bcd60e51b815260206004820152602a60248201527f4665655661756c743a20696e73756666696369656e742062616c616e636520746044820152696f20776974686472617760b01b60648201526084016102c1565b6004805483019055600354604080518481526001600160a01b0390921660208301523382820152517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1600254600354604080516020810182526000808252915163b2267a7b60e01b81526001600160a01b039485169463b2267a7b9488946103c99491909216928592906004016106cb565b6000604051808303818588803b1580156103e257600080fd5b505af11580156103f6573d6000803e3d6000fd5b50505050505050565b476104098161023a565b50565b6000546001600160a01b031633146104365760405162461bcd60e51b81526004016102c190610737565b6104406000610632565b565b6000546001600160a01b0316331461046c5760405162461bcd60e51b81526004016102c190610737565b600280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f1c928c417a10a21c3cddad148c5dba5d710e4b1442d6d8a36de345935ad8461290600090a35050565b6000546001600160a01b031633146104e85760405162461bcd60e51b81526004016102c190610737565b6001600160a01b03811661053e5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016102c1565b61040981610632565b6000546001600160a01b031633146105715760405162461bcd60e51b81526004016102c190610737565b600380546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f7e1e96961a397c8aa26162fe259cc837afc95e33aad4945ddc61c18dabb7a6ad90600090a35050565b6000546001600160a01b031633146105ed5760405162461bcd60e51b81526004016102c190610737565b600180549082905560408051828152602081018490527f0d3c80219fe57713b9f9c83d1e51426792d0c14d8e330e65b102571816140965910160405180910390a15050565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561069457600080fd5b5035919050565b6000602082840312156106ad57600080fd5b81356001600160a01b03811681146106c457600080fd5b9392505050565b60018060a01b038516815260006020858184015260806040840152845180608085015260005b8181101561070d5786810183015185820160a0015282016106f1565b50600060a0828601015260a0601f19601f8301168501019250505082606083015295945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e657200000000000000000060408201526060019056fea2646970667358221220063c6c384f745ebcacfdd13320e5b9a50687aae43ff14566761f56273111b97e64736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0xF9062b8a30e0d7722960e305049FA50b86ba6253","0x0000000000000000000000000000000000000000000000000000000000000001":"0x8ac7230489e80000","0x0000000000000000000000000000000000000000000000000000000000000002":"0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC","0x0000000000000000000000000000000000000000000000000000000000000003":"0x8FA3b4570B4C96f8036C13b64971BA65867eEB48"}}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":null}
diff --git reth/crates/scroll/chainspec/res/genesis/sepolia_scroll.json scroll-reth/crates/scroll/chainspec/res/genesis/sepolia_scroll.json new file mode 100644 index 0000000000000000000000000000000000000000..64987e5c8c5eed66d6f1927b5f8f0b6d31b18de9 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/res/genesis/sepolia_scroll.json @@ -0,0 +1 @@ +{"config":{"chainId":534351,"homesteadBlock":0,"eip150Block":0,"eip150Hash":"0x0000000000000000000000000000000000000000000000000000000000000000","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"berlinBlock":0,"londonBlock":0,"archimedesBlock":0,"shanghaiBlock":0,"bernoulliBlock":3747132,"curieBlock":4740239,"clique":{"period":3,"epoch":30000,"relaxed_period":true},"scroll":{"useZktrie":true,"maxTxPerBlock":100,"maxTxPayloadBytesPerBlock":122880,"feeVaultAddress":"0x5300000000000000000000000000000000000005","enableEIP2718":false,"enableEIP1559":false,"l1Config":{"l1ChainId":"11155111","l1MessageQueueAddress":"0xF0B2293F5D834eAe920c6974D50957A1732de763","scrollChainAddress":"0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0","numL1MessagesPerBlock":"10"}}},"nonce":"0x0","timestamp":"0x64cfd015","extraData":"0x000000000000000000000000000000000000000000000000000000000000000048C3F81f3D998b6652900e1C3183736C238Fe4290000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","gasLimit":"8000000","difficulty":"0x1","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0x18960EEc21b1878C581937a14c5c3C43008F6b6B":{"balance":"10000000000000000000"},"0xBa50f5340FB9F3Bd074bD638c9BE13eCB36E603d":{"balance":"226156424291633194186662080095093570025917938800079226629565593765455331328"},"0x5300000000000000000000000000000000000000":{"balance":"0x0","code":"0x608060405234801561001057600080fd5b50600436106100935760003560e01c806383cc76601161006657806383cc7660146100fc5780638da5cb5b1461010f578063c4d66de814610122578063d4b9f4fa14610135578063f2fde38b1461013e57600080fd5b806326aad7b7146100985780633cb747bf146100b4578063600a2e77146100df578063715018a6146100f2575b600080fd5b6100a160015481565b6040519081526020015b60405180910390f35b6053546100c7906001600160a01b031681565b6040516001600160a01b0390911681526020016100ab565b6100a16100ed36600461054a565b610151565b6100fa6101f6565b005b6100a161010a36600461054a565b61022c565b6052546100c7906001600160a01b031681565b6100fa610130366004610563565b610243565b6100a160005481565b6100fa61014c366004610563565b6102db565b6053546000906001600160a01b031633146101a45760405162461bcd60e51b815260206004820152600e60248201526d37b7363c9036b2b9b9b2b733b2b960911b60448201526064015b60405180910390fd5b6000806101b084610367565b60408051838152602081018890529294509092507ffaa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693910160405180910390a19392505050565b6052546001600160a01b031633146102205760405162461bcd60e51b815260040161019b90610593565b61022a6000610486565b565b602a816028811061023c57600080fd5b0154905081565b6052546001600160a01b0316331461026d5760405162461bcd60e51b815260040161019b90610593565b600154156102b15760405162461bcd60e51b815260206004820152601160248201527063616e6e6f7420696e697469616c697a6560781b604482015260640161019b565b6102b96104d8565b605380546001600160a01b0319166001600160a01b0392909216919091179055565b6052546001600160a01b031633146103055760405162461bcd60e51b815260040161019b90610593565b6001600160a01b03811661035b5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161019b565b61036481610486565b50565b60035460009081906103bb5760405162461bcd60e51b815260206004820152601a60248201527f63616c6c206265666f726520696e697469616c697a6174696f6e000000000000604482015260640161019b565b6001548360005b8215610456576103d36002846105e0565b60000361041f5781602a82602881106103ee576103ee6105ca565b01556104188260028360288110610407576104076105ca565b015460009182526020526040902090565b915061044a565b610447602a8260288110610435576104356105ca565b01548360009182526020526040902090565b91505b600192831c92016103c2565b81602a826028811061046a5761046a6105ca565b0155506000819055600180548082019091559590945092505050565b605280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b60005b60286104e8826001610618565b10156103645761051960028260288110610504576105046105ca565b015460028360288110610407576104076105ca565b6002610526836001610618565b60288110610536576105366105ca565b01558061054281610631565b9150506104db565b60006020828403121561055c57600080fd5b5035919050565b60006020828403121561057557600080fd5b81356001600160a01b038116811461058c57600080fd5b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b600052603260045260246000fd5b6000826105fd57634e487b7160e01b600052601260045260246000fd5b500690565b634e487b7160e01b600052601160045260246000fd5b8082018082111561062b5761062b610602565b92915050565b60006001820161064357610643610602565b506001019056fea26469706673582212208fb1cb9933bb17dd0a7c17de7c890919b08d2fd7eb2bede7b41caa32709b30b564736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000052":"0x18960EEc21b1878C581937a14c5c3C43008F6b6B"}},"0x5300000000000000000000000000000000000002":{"balance":"0x0","code":"0x608060405234801561001057600080fd5b50600436106100cf5760003560e01c8063715018a61161008c578063bede39b511610066578063bede39b51461018d578063de26c4a1146101a0578063f2fde38b146101b3578063f45e65d8146101c657600080fd5b8063715018a6146101475780638da5cb5b1461014f57806393e59dc11461017a57600080fd5b80630c18c162146100d45780633577afc5146100f05780633d0f963e1461010557806349948e0e14610118578063519b4bd31461012b5780637046559714610134575b600080fd5b6100dd60025481565b6040519081526020015b60405180910390f35b6101036100fe366004610671565b6101cf565b005b61010361011336600461068a565b610291565b6100dd6101263660046106d0565b61031c565b6100dd60015481565b610103610142366004610671565b610361565b610103610416565b600054610162906001600160a01b031681565b6040516001600160a01b0390911681526020016100e7565b600454610162906001600160a01b031681565b61010361019b366004610671565b61044c565b6100dd6101ae3660046106d0565b610533565b6101036101c136600461068a565b610595565b6100dd60035481565b6000546001600160a01b031633146102025760405162461bcd60e51b81526004016101f990610781565b60405180910390fd5b621c9c388111156102555760405162461bcd60e51b815260206004820152601760248201527f657863656564206d6178696d756d206f7665726865616400000000000000000060448201526064016101f9565b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6000546001600160a01b031633146102bb5760405162461bcd60e51b81526004016101f990610781565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f7910160405180910390a15050565b60008061032883610533565b905060006001548261033a91906107b8565b9050633b9aca006003548261034f91906107b8565b61035991906107e5565b949350505050565b6000546001600160a01b0316331461038b5760405162461bcd60e51b81526004016101f990610781565b61039b633b9aca006103e86107b8565b8111156103e15760405162461bcd60e51b8152602060048201526014602482015273657863656564206d6178696d756d207363616c6560601b60448201526064016101f9565b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a90602001610286565b6000546001600160a01b031633146104405760405162461bcd60e51b81526004016101f990610781565b61044a6000610621565b565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa158015610495573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104b99190610807565b6104fe5760405162461bcd60e51b81526020600482015260166024820152752737ba103bb434ba32b634b9ba32b21039b2b73232b960511b60448201526064016101f9565b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c4490602001610286565b80516000908190815b818110156105865784818151811061055657610556610829565b01602001516001600160f81b0319166000036105775760048301925061057e565b6010830192505b60010161053c565b50506002540160400192915050565b6000546001600160a01b031633146105bf5760405162461bcd60e51b81526004016101f990610781565b6001600160a01b0381166106155760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016101f9565b61061e81610621565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60006020828403121561068357600080fd5b5035919050565b60006020828403121561069c57600080fd5b81356001600160a01b03811681146106b357600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000602082840312156106e257600080fd5b813567ffffffffffffffff808211156106fa57600080fd5b818401915084601f83011261070e57600080fd5b813581811115610720576107206106ba565b604051601f8201601f19908116603f01168101908382118183101715610748576107486106ba565b8160405282815287602084870101111561076157600080fd5b826020860160208301376000928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b60008160001904831182151516156107e057634e487b7160e01b600052601160045260246000fd5b500290565b60008261080257634e487b7160e01b600052601260045260246000fd5b500490565b60006020828403121561081957600080fd5b815180151581146106b357600080fd5b634e487b7160e01b600052603260045260246000fdfea26469706673582212205ea335809638809cf032c794fd966e2439020737b1dcc2218435cb438286efcf64736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x18960EEc21b1878C581937a14c5c3C43008F6b6B"}},"0x5300000000000000000000000000000000000003":{"balance":"0x0","code":"0x608060405234801561001057600080fd5b50600436106100575760003560e01c8063715018a61461005c57806379586dd7146100665780638da5cb5b14610079578063efc78401146100a9578063f2fde38b146100e5575b600080fd5b6100646100f8565b005b610064610074366004610356565b610137565b60005461008c906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b6100d56100b736600461042d565b6001600160a01b031660009081526001602052604090205460ff1690565b60405190151581526020016100a0565b6100646100f336600461042d565b610238565b6000546001600160a01b0316331461012b5760405162461bcd60e51b81526004016101229061044f565b60405180910390fd5b61013560006102c4565b565b6000546001600160a01b031633146101615760405162461bcd60e51b81526004016101229061044f565b60005b825181101561023357816001600085848151811061018457610184610486565b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060006101000a81548160ff0219169083151502179055508281815181106101d5576101d5610486565b60200260200101516001600160a01b03167f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d83604051610219911515815260200190565b60405180910390a28061022b8161049c565b915050610164565b505050565b6000546001600160a01b031633146102625760405162461bcd60e51b81526004016101229061044f565b6001600160a01b0381166102b85760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610122565b6102c1816102c4565b50565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b634e487b7160e01b600052604160045260246000fd5b80356001600160a01b038116811461034157600080fd5b919050565b8035801515811461034157600080fd5b6000806040838503121561036957600080fd5b823567ffffffffffffffff8082111561038157600080fd5b818501915085601f83011261039557600080fd5b81356020828211156103a9576103a9610314565b8160051b604051601f19603f830116810181811086821117156103ce576103ce610314565b6040529283528183019350848101820192898411156103ec57600080fd5b948201945b83861015610411576104028661032a565b855294820194938201936103f1565b96506104209050878201610346565b9450505050509250929050565b60006020828403121561043f57600080fd5b6104488261032a565b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b600052603260045260246000fd5b6000600182016104bc57634e487b7160e01b600052601160045260246000fd5b506001019056fea26469706673582212203414b076e92b618bd7c3437159d7bceb2acc3a5c82f51f383465512d9c52e97064736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x18960EEc21b1878C581937a14c5c3C43008F6b6B"}},"0x5300000000000000000000000000000000000004":{"balance":"0x0","code":"0x6080604052600436106101025760003560e01c806370a0823111610095578063a457c2d711610064578063a457c2d7146102b4578063a9059cbb146102d4578063d0e30db0146102f4578063d505accf146102fc578063dd62ed3e1461031c57600080fd5b806370a08231146102215780637ecebe001461025757806384b0196e1461027757806395d89b411461029f57600080fd5b80632e1a7d4d116100d15780632e1a7d4d146101b0578063313ce567146101d05780633644e515146101ec578063395093511461020157600080fd5b806306fdde0314610116578063095ea7b31461014157806318160ddd1461017157806323b872dd1461019057600080fd5b366101115761010f61033c565b005b600080fd5b34801561012257600080fd5b5061012b61037d565b60405161013891906112cf565b60405180910390f35b34801561014d57600080fd5b5061016161015c366004611305565b61040f565b6040519015158152602001610138565b34801561017d57600080fd5b506002545b604051908152602001610138565b34801561019c57600080fd5b506101616101ab36600461132f565b610429565b3480156101bc57600080fd5b5061010f6101cb36600461136b565b61044d565b3480156101dc57600080fd5b5060405160128152602001610138565b3480156101f857600080fd5b50610182610523565b34801561020d57600080fd5b5061016161021c366004611305565b610532565b34801561022d57600080fd5b5061018261023c366004611384565b6001600160a01b031660009081526020819052604090205490565b34801561026357600080fd5b50610182610272366004611384565b610554565b34801561028357600080fd5b5061028c610572565b604051610138979695949392919061139f565b3480156102ab57600080fd5b5061012b6105fb565b3480156102c057600080fd5b506101616102cf366004611305565b61060a565b3480156102e057600080fd5b506101616102ef366004611305565b610685565b61010f61033c565b34801561030857600080fd5b5061010f610317366004611435565b610693565b34801561032857600080fd5b506101826103373660046114a8565b6107f7565b6103463334610856565b60405134815233907fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9060200160405180910390a2565b60606003805461038c906114db565b80601f01602080910402602001604051908101604052809291908181526020018280546103b8906114db565b80156104055780601f106103da57610100808354040283529160200191610405565b820191906000526020600020905b8154815290600101906020018083116103e857829003601f168201915b5050505050905090565b60003361041d818585610915565b60019150505b92915050565b600033610437858285610a3a565b610442858585610ab4565b506001949350505050565b6104573382610c58565b604051600090339083908381818185875af1925050503d8060008114610499576040519150601f19603f3d011682016040523d82523d6000602084013e61049e565b606091505b50509050806104ea5760405162461bcd60e51b81526020600482015260136024820152721dda5d1a191c985dc81155120819985a5b1959606a1b60448201526064015b60405180910390fd5b60405182815233907f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b659060200160405180910390a25050565b600061052d610d87565b905090565b60003361041d81858561054583836107f7565b61054f919061150f565b610915565b6001600160a01b038116600090815260076020526040812054610423565b6000606080828080836105a67f577261707065642045746865720000000000000000000000000000000000000d6005610eb2565b6105d17f31000000000000000000000000000000000000000000000000000000000000016006610eb2565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b60606004805461038c906114db565b6000338161061882866107f7565b9050838110156106785760405162461bcd60e51b815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b60648201526084016104e1565b6104428286868403610915565b60003361041d818585610ab4565b834211156106e35760405162461bcd60e51b815260206004820152601d60248201527f45524332305065726d69743a206578706972656420646561646c696e6500000060448201526064016104e1565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886107128c610f56565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e001604051602081830303815290604052805190602001209050600061076d82610f7e565b9050600061077d82878787610fab565b9050896001600160a01b0316816001600160a01b0316146107e05760405162461bcd60e51b815260206004820152601e60248201527f45524332305065726d69743a20696e76616c6964207369676e6174757265000060448201526064016104e1565b6107eb8a8a8a610915565b50505050505050505050565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b600060208351101561083e5761083783610fd3565b9050610423565b816108498482611594565b5060ff9050610423565b90565b6001600160a01b0382166108ac5760405162461bcd60e51b815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f20616464726573730060448201526064016104e1565b80600260008282546108be919061150f565b90915550506001600160a01b038216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b6001600160a01b0383166109775760405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b60648201526084016104e1565b6001600160a01b0382166109d85760405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b60648201526084016104e1565b6001600160a01b0383811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591015b60405180910390a3505050565b6000610a4684846107f7565b90506000198114610aae5781811015610aa15760405162461bcd60e51b815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e636500000060448201526064016104e1565b610aae8484848403610915565b50505050565b6001600160a01b038316610b185760405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b60648201526084016104e1565b6001600160a01b038216610b7a5760405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b60648201526084016104e1565b6001600160a01b03831660009081526020819052604090205481811015610bf25760405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b60648201526084016104e1565b6001600160a01b03848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610aae565b6001600160a01b038216610cb85760405162461bcd60e51b815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f206164647265736044820152607360f81b60648201526084016104e1565b6001600160a01b03821660009081526020819052604090205481811015610d2c5760405162461bcd60e51b815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e604482015261636560f01b60648201526084016104e1565b6001600160a01b0383166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9101610a2d565b505050565b6000306001600160a01b037f0000000000000000000000005fbdb2315678afecb367f032d93f642f64180aa316148015610de057507f000000000000000000000000000000000000000000000000000000000008274f46145b15610e0a57507f624453decb4e78ca99c7630ff9f52222ea6f559f0a6c1bb60b935ef006fa159e90565b61052d604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f00cd3d46df44f2cbb950cf84eb2e92aa2ddd23195b1a009173ea59a063357ed3918101919091527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc660608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b606060ff8314610ec55761083783611011565b818054610ed1906114db565b80601f0160208091040260200160405190810160405280929190818152602001828054610efd906114db565b8015610f4a5780601f10610f1f57610100808354040283529160200191610f4a565b820191906000526020600020905b815481529060010190602001808311610f2d57829003601f168201915b50505050509050610423565b6001600160a01b03811660009081526007602052604090208054600181018255905b50919050565b6000610423610f8b610d87565b8360405161190160f01b8152600281019290925260228201526042902090565b6000806000610fbc87878787611050565b91509150610fc981611114565b5095945050505050565b600080829050601f81511115610ffe578260405163305a27a960e01b81526004016104e191906112cf565b805161100982611654565b179392505050565b6060600061101e83611261565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6000807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0831115611087575060009050600361110b565b6040805160008082526020820180845289905260ff881692820192909252606081018690526080810185905260019060a0016020604051602081039080840390855afa1580156110db573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b0381166111045760006001925092505061110b565b9150600090505b94509492505050565b600081600481111561112857611128611678565b036111305750565b600181600481111561114457611144611678565b036111915760405162461bcd60e51b815260206004820152601860248201527f45434453413a20696e76616c6964207369676e6174757265000000000000000060448201526064016104e1565b60028160048111156111a5576111a5611678565b036111f25760405162461bcd60e51b815260206004820152601f60248201527f45434453413a20696e76616c6964207369676e6174757265206c656e6774680060448201526064016104e1565b600381600481111561120657611206611678565b0361125e5760405162461bcd60e51b815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202773272076616c604482015261756560f01b60648201526084016104e1565b50565b600060ff8216601f81111561042357604051632cd44ac360e21b815260040160405180910390fd5b6000815180845260005b818110156112af57602081850181015186830182015201611293565b506000602082860101526020601f19601f83011685010191505092915050565b6020815260006112e26020830184611289565b9392505050565b80356001600160a01b038116811461130057600080fd5b919050565b6000806040838503121561131857600080fd5b611321836112e9565b946020939093013593505050565b60008060006060848603121561134457600080fd5b61134d846112e9565b925061135b602085016112e9565b9150604084013590509250925092565b60006020828403121561137d57600080fd5b5035919050565b60006020828403121561139657600080fd5b6112e2826112e9565b60ff60f81b881681526000602060e0818401526113bf60e084018a611289565b83810360408501526113d1818a611289565b606085018990526001600160a01b038816608086015260a0850187905284810360c0860152855180825283870192509083019060005b8181101561142357835183529284019291840191600101611407565b50909c9b505050505050505050505050565b600080600080600080600060e0888a03121561145057600080fd5b611459886112e9565b9650611467602089016112e9565b95506040880135945060608801359350608088013560ff8116811461148b57600080fd5b9699959850939692959460a0840135945060c09093013592915050565b600080604083850312156114bb57600080fd5b6114c4836112e9565b91506114d2602084016112e9565b90509250929050565b600181811c908216806114ef57607f821691505b602082108103610f7857634e487b7160e01b600052602260045260246000fd5b8082018082111561042357634e487b7160e01b600052601160045260246000fd5b634e487b7160e01b600052604160045260246000fd5b601f821115610d8257600081815260208120601f850160051c8101602086101561156d5750805b601f850160051c820191505b8181101561158c57828155600101611579565b505050505050565b815167ffffffffffffffff8111156115ae576115ae611530565b6115c2816115bc84546114db565b84611546565b602080601f8311600181146115f757600084156115df5750858301515b600019600386901b1c1916600185901b17855561158c565b600085815260208120601f198616915b8281101561162657888601518255948401946001909101908401611607565b50858210156116445787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b80516020808301519190811015610f785760001960209190910360031b1b16919050565b634e487b7160e01b600052602160045260246000fdfea264697066735822122075458b204a41338df799effa8b73c6c1a17e612bc3b3311c0cec123c4da7709964736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000003":"0x577261707065642045746865720000000000000000000000000000000000001a","0x0000000000000000000000000000000000000000000000000000000000000004":"0x5745544800000000000000000000000000000000000000000000000000000008"}},"0x5300000000000000000000000000000000000005":{"balance":"0x0","code":"0x6080604052600436106100a05760003560e01c806384411d651161006457806384411d65146101595780638da5cb5b1461016f5780639e7adc791461018f578063f2fde38b146101af578063feec756c146101cf578063ff4f3546146101ef57600080fd5b80633cb747bf146100ac5780633ccfd60b146100e9578063457e1a491461010057806366d003ac14610124578063715018a61461014457600080fd5b366100a757005b600080fd5b3480156100b857600080fd5b506002546100cc906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b3480156100f557600080fd5b506100fe61020f565b005b34801561010c57600080fd5b5061011660015481565b6040519081526020016100e0565b34801561013057600080fd5b506003546100cc906001600160a01b031681565b34801561015057600080fd5b506100fe610371565b34801561016557600080fd5b5061011660045481565b34801561017b57600080fd5b506000546100cc906001600160a01b031681565b34801561019b57600080fd5b506100fe6101aa3660046105ea565b6103a7565b3480156101bb57600080fd5b506100fe6101ca3660046105ea565b610423565b3480156101db57600080fd5b506100fe6101ea3660046105ea565b6104af565b3480156101fb57600080fd5b506100fe61020a36600461061a565b61052b565b60015447908110156102a15760405162461bcd60e51b815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d20776974686472616064820152691dd85b08185b5bdd5b9d60b21b608482015260a4015b60405180910390fd5b6004805482019055600354604080518381526001600160a01b0390921660208301523382820152517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1600254600354604080516020810182526000808252915163b2267a7b60e01b81526001600160a01b039485169463b2267a7b94879461033c949190921692859290600401610633565b6000604051808303818588803b15801561035557600080fd5b505af1158015610369573d6000803e3d6000fd5b505050505050565b6000546001600160a01b0316331461039b5760405162461bcd60e51b81526004016102989061069f565b6103a5600061059a565b565b6000546001600160a01b031633146103d15760405162461bcd60e51b81526004016102989061069f565b600280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f1c928c417a10a21c3cddad148c5dba5d710e4b1442d6d8a36de345935ad8461290600090a35050565b6000546001600160a01b0316331461044d5760405162461bcd60e51b81526004016102989061069f565b6001600160a01b0381166104a35760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610298565b6104ac8161059a565b50565b6000546001600160a01b031633146104d95760405162461bcd60e51b81526004016102989061069f565b600380546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f7e1e96961a397c8aa26162fe259cc837afc95e33aad4945ddc61c18dabb7a6ad90600090a35050565b6000546001600160a01b031633146105555760405162461bcd60e51b81526004016102989061069f565b600180549082905560408051828152602081018490527f0d3c80219fe57713b9f9c83d1e51426792d0c14d8e330e65b102571816140965910160405180910390a15050565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6000602082840312156105fc57600080fd5b81356001600160a01b038116811461061357600080fd5b9392505050565b60006020828403121561062c57600080fd5b5035919050565b60018060a01b038516815260006020858184015260806040840152845180608085015260005b818110156106755786810183015185820160a001528201610659565b50600060a0828601015260a0601f19601f8301168501019250505082606083015295945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e657200000000000000000060408201526060019056fea26469706673582212200c5bec0af207d4c7845829d5330f295a5f16702ab8bde670ae90be68974af0a764736f6c63430008100033","storage":{"0x0000000000000000000000000000000000000000000000000000000000000000":"0x18960EEc21b1878C581937a14c5c3C43008F6b6B","0x0000000000000000000000000000000000000000000000000000000000000001":"0x8ac7230489e80000","0x0000000000000000000000000000000000000000000000000000000000000002":"0xBa50f5340FB9F3Bd074bD638c9BE13eCB36E603d","0x0000000000000000000000000000000000000000000000000000000000000003":"0x2351C7aD0c8cFEB25c81301EAC922ab1f1980bbe"}}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeePerGas":null}
diff --git reth/crates/scroll/chainspec/src/constants.rs scroll-reth/crates/scroll/chainspec/src/constants.rs new file mode 100644 index 0000000000000000000000000000000000000000..a95fba0ee468f5e89356087dfe3bbc64292f7bd9 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/src/constants.rs @@ -0,0 +1,122 @@ +use crate::genesis::L1Config; +use alloy_eips::eip1559::BaseFeeParams; +use alloy_primitives::{address, b256, Address, B256}; + +/// The transaction fee recipient on the L2. +pub const SCROLL_FEE_VAULT_ADDRESS: Address = address!("5300000000000000000000000000000000000005"); + +/// The maximum size in bytes of the payload for a block. +pub const MAX_TX_PAYLOAD_BYTES_PER_BLOCK: usize = 120 * 1024; + +/// The system contract on L2 mainnet. +pub const SCROLL_MAINNET_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS: Address = + address!("331A873a2a85219863d80d248F9e2978fE88D0Ea"); + +/// The L1 message queue address for Scroll mainnet. +/// <https://etherscan.io/address/0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B>. +pub const SCROLL_MAINNET_L1_MESSAGE_QUEUE_ADDRESS: Address = + address!("0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"); + +/// The L1 message queue v2 address for Scroll mainnet. +/// <https://etherscan.io/address/0x56971da63A3C0205184FEF096E9ddFc7A8C2D18a>. +pub const SCROLL_MAINNET_L1_MESSAGE_QUEUE_V2_ADDRESS: Address = + address!("56971da63A3C0205184FEF096E9ddFc7A8C2D18a"); + +/// The L1 proxy address for Scroll mainnet. +/// <https://etherscan.io/address/0xa13BAF47339d63B743e7Da8741db5456DAc1E556>. +pub const SCROLL_MAINNET_L1_PROXY_ADDRESS: Address = + address!("a13BAF47339d63B743e7Da8741db5456DAc1E556"); + +/// The maximum allowed l1 messages per block for Scroll mainnet. +pub const SCROLL_MAINNET_MAX_L1_MESSAGES: u64 = 10; + +/// The L1 configuration for Scroll mainnet. +pub const SCROLL_MAINNET_L1_CONFIG: L1Config = L1Config { + l1_chain_id: alloy_chains::NamedChain::Mainnet as u64, + l1_message_queue_address: SCROLL_MAINNET_L1_MESSAGE_QUEUE_ADDRESS, + l1_message_queue_v2_address: SCROLL_MAINNET_L1_MESSAGE_QUEUE_V2_ADDRESS, + l2_system_config_address: SCROLL_MAINNET_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, + scroll_chain_address: SCROLL_MAINNET_L1_PROXY_ADDRESS, + num_l1_messages_per_block: SCROLL_MAINNET_MAX_L1_MESSAGES, +}; + +/// The Scroll Mainnet genesis hash +pub const SCROLL_MAINNET_GENESIS_HASH: B256 = + b256!("bbc05efd412b7cd47a2ed0e5ddfcf87af251e414ea4c801d78b6784513180a80"); + +/// The system contract on L2 sepolia. +pub const SCROLL_SEPOLIA_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS: Address = + address!("F444cF06A3E3724e20B35c2989d3942ea8b59124"); + +/// The L1 message queue address for Scroll sepolia. +/// <https://sepolia.etherscan.io/address/0xF0B2293F5D834eAe920c6974D50957A1732de763>. +pub const SCROLL_SEPOLIA_L1_MESSAGE_QUEUE_ADDRESS: Address = + address!("F0B2293F5D834eAe920c6974D50957A1732de763"); + +/// The L1 message queue address v2 for Scroll sepolia. +/// <https://sepolia.etherscan.io/address/0xA0673eC0A48aa924f067F1274EcD281A10c5f19F>. +pub const SCROLL_SEPOLIA_L1_MESSAGE_QUEUE_V2_ADDRESS: Address = + address!("A0673eC0A48aa924f067F1274EcD281A10c5f19F"); + +/// The L1 proxy address for Scroll sepolia. +/// <https://sepolia.etherscan.io/address/0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0> +pub const SCROLL_SEPOLIA_L1_PROXY_ADDRESS: Address = + address!("2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"); + +/// The maximum allowed l1 messages per block for Scroll sepolia. +pub const SCROLL_SEPOLIA_MAX_L1_MESSAGES: u64 = 10; + +/// The L1 configuration for Scroll sepolia. +pub const SCROLL_SEPOLIA_L1_CONFIG: L1Config = L1Config { + l1_chain_id: alloy_chains::NamedChain::Sepolia as u64, + l1_message_queue_address: SCROLL_SEPOLIA_L1_MESSAGE_QUEUE_ADDRESS, + l1_message_queue_v2_address: SCROLL_SEPOLIA_L1_MESSAGE_QUEUE_V2_ADDRESS, + l2_system_config_address: SCROLL_SEPOLIA_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, + scroll_chain_address: SCROLL_SEPOLIA_L1_PROXY_ADDRESS, + num_l1_messages_per_block: SCROLL_SEPOLIA_MAX_L1_MESSAGES, +}; + +/// The system contract on devnet. +pub const SCROLL_DEV_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS: Address = + address!("0000000000000000000000000000000000000000"); + +/// The L1 message queue address for Scroll dev. +pub const SCROLL_DEV_L1_MESSAGE_QUEUE_ADDRESS: Address = + address!("0000000000000000000000000000000000000000"); + +/// The L1 message queue v2 address for Scroll dev. +pub const SCROLL_DEV_L1_MESSAGE_QUEUE_V2_ADDRESS: Address = + address!("0000000000000000000000000000000000000000"); + +/// The L1 proxy address for Scroll dev. +pub const SCROLL_DEV_L1_PROXY_ADDRESS: Address = + address!("0000000000000000000000000000000000000000"); + +/// The maximum allowed l1 messages per block for Scroll dev. +pub const SCROLL_DEV_MAX_L1_MESSAGES: u64 = 10; + +/// The L1 configuration for Scroll dev. +pub const SCROLL_DEV_L1_CONFIG: L1Config = L1Config { + l1_chain_id: alloy_chains::NamedChain::Goerli as u64, + l1_message_queue_address: SCROLL_DEV_L1_MESSAGE_QUEUE_ADDRESS, + l1_message_queue_v2_address: SCROLL_DEV_L1_MESSAGE_QUEUE_V2_ADDRESS, + scroll_chain_address: SCROLL_DEV_L1_PROXY_ADDRESS, + l2_system_config_address: SCROLL_DEV_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, + num_l1_messages_per_block: SCROLL_DEV_MAX_L1_MESSAGES, +}; + +/// The Scroll Sepolia genesis hash +pub const SCROLL_SEPOLIA_GENESIS_HASH: B256 = + b256!("aa62d1a8b2bffa9e5d2368b63aae0d98d54928bd713125e3fd9e5c896c68592c"); + +/// The base fee params for Feynman. +pub const SCROLL_BASE_FEE_PARAMS_FEYNMAN: BaseFeeParams = BaseFeeParams::new( + SCROLL_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_FEYNMAN, + SCROLL_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER_FEYNMAN, +); + +/// The scroll EIP1559 max change denominator for Feynman. +pub const SCROLL_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_FEYNMAN: u128 = 8; + +/// The scroll EIP1559 default elasticity multiplier for Feynman. +pub const SCROLL_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER_FEYNMAN: u128 = 2;
diff --git reth/crates/scroll/chainspec/src/dev.rs scroll-reth/crates/scroll/chainspec/src/dev.rs new file mode 100644 index 0000000000000000000000000000000000000000..61bc0e52d62700352ed170fea8767e38209dd33a --- /dev/null +++ scroll-reth/crates/scroll/chainspec/src/dev.rs @@ -0,0 +1,41 @@ +//! Chain specification in dev mode for custom chain. + +use crate::{ + constants::SCROLL_BASE_FEE_PARAMS_FEYNMAN, make_genesis_header, LazyLock, ScrollChainConfig, + ScrollChainSpec, +}; +use alloc::{sync::Arc, vec}; + +use alloy_chains::Chain; +use alloy_primitives::U256; +use reth_chainspec::{BaseFeeParamsKind, ChainSpec, Hardfork}; +use reth_primitives_traits::SealedHeader; +use reth_scroll_forks::DEV_HARDFORKS; +use scroll_alloy_hardforks::ScrollHardfork; + +/// Scroll dev testnet specification +/// +/// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test +/// test test test test test test test junk". +pub static SCROLL_DEV: LazyLock<Arc<ScrollChainSpec>> = LazyLock::new(|| { + // In order to have Feynman activated at block 0, we set the `baseFeePerGas` field of the devnet + // genesis to 0. + let genesis = serde_json::from_str(include_str!("../res/genesis/dev.json")) + .expect("Can't deserialize Dev testnet genesis json"); + + ScrollChainSpec { + inner: ChainSpec { + chain: Chain::dev(), + genesis_header: SealedHeader::new_unhashed(make_genesis_header(&genesis)), + genesis, + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks: DEV_HARDFORKS.clone(), + base_fee_params: BaseFeeParamsKind::Variable( + vec![(ScrollHardfork::Feynman.boxed(), SCROLL_BASE_FEE_PARAMS_FEYNMAN)].into(), + ), + ..Default::default() + }, + config: ScrollChainConfig::dev(), + } + .into() +});
diff --git reth/crates/scroll/chainspec/src/genesis.rs scroll-reth/crates/scroll/chainspec/src/genesis.rs new file mode 100644 index 0000000000000000000000000000000000000000..747e2ddd1b395e44b0b348e95e0454234046de05 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/src/genesis.rs @@ -0,0 +1,280 @@ +//! Scroll types for genesis data. + +use crate::{ + constants::{ + MAX_TX_PAYLOAD_BYTES_PER_BLOCK, SCROLL_FEE_VAULT_ADDRESS, SCROLL_MAINNET_L1_CONFIG, + SCROLL_SEPOLIA_L1_CONFIG, + }, + SCROLL_DEV_L1_CONFIG, +}; + +use alloy_primitives::Address; +use alloy_serde::OtherFields; +use serde::de::Error; + +/// Container type for all Scroll-specific fields in a genesis file. +/// This struct represents the configuration details and metadata +/// that are specific to the Scroll blockchain, used during the chain's initialization. +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ScrollChainInfo { + /// Information about hard forks specific to the Scroll chain. + /// This optional field contains metadata about various hard fork + /// configurations that are specific to the Scroll blockchain. + pub hard_fork_info: Option<ScrollHardforkInfo>, + /// Scroll chain-specific configuration details. + /// Encapsulates special parameters and settings + /// required for Scroll chain functionality, such as fee-related + /// addresses and Layer 1 configuration. + pub scroll_chain_config: ScrollChainConfig, +} + +impl ScrollChainInfo { + /// Extracts the Scroll specific fields from a genesis file. These fields are expected to be + /// contained in the `genesis.config` under `extra_fields` property. + pub fn extract_from(others: &OtherFields) -> Option<Self> { + Self::try_from(others).ok() + } +} + +impl TryFrom<&OtherFields> for ScrollChainInfo { + type Error = serde_json::Error; + + fn try_from(others: &OtherFields) -> Result<Self, Self::Error> { + let hard_fork_info = ScrollHardforkInfo::try_from(others).ok(); + let scroll_chain_config = ScrollChainConfig::try_from(others)?; + + Ok(Self { hard_fork_info, scroll_chain_config }) + } +} + +/// [`ScrollHardforkInfo`] specifies the block numbers and timestamps at which the Scroll hardforks +/// were activated. +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ScrollHardforkInfo { + /// archimedes block number + pub archimedes_block: Option<u64>, + /// bernoulli block number + pub bernoulli_block: Option<u64>, + /// curie block number + pub curie_block: Option<u64>, + /// darwin hardfork timestamp + pub darwin_time: Option<u64>, + /// darwinV2 hardfork timestamp + pub darwin_v2_time: Option<u64>, + /// euclid hardfork timestamp + pub euclid_time: Option<u64>, + /// euclidV2 hardfork timestamp + pub euclid_v2_time: Option<u64>, + /// feynman hardfork timestamp + pub feynman_time: Option<u64>, + /// galileo hardfork timestamp + pub galileo_time: Option<u64>, + /// galileoV2 hardfork timestamp + pub galileo_v2_time: Option<u64>, +} + +impl ScrollHardforkInfo { + /// Extract the Scroll-specific genesis info from a genesis file. + pub fn extract_from(others: &OtherFields) -> Option<Self> { + Self::try_from(others).ok() + } +} + +impl TryFrom<&OtherFields> for ScrollHardforkInfo { + type Error = serde_json::Error; + + fn try_from(others: &OtherFields) -> Result<Self, Self::Error> { + others.deserialize_as() + } +} + +/// The Scroll l1 config +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L1Config { + /// l1 chain id + pub l1_chain_id: u64, + /// The L1 contract address of the contract that handles the message queue targeting the Scroll + /// rollup. + pub l1_message_queue_address: Address, + /// The L1 contract address of the contract that handles the message queue v2 targeting the + /// Scroll rollup, used post Euclid fork. + pub l1_message_queue_v2_address: Address, + /// The L1 contract address of the proxy contract which is responsible for Scroll rollup + /// settlement. + pub scroll_chain_address: Address, + /// The address of the L2 system contract. + pub l2_system_config_address: Address, + /// The maximum number of L1 messages to be consumed per L2 rollup block. + pub num_l1_messages_per_block: u64, +} + +/// The configuration for the Scroll sequencer chain. +/// This struct holds the configuration details specific to the Scroll chain, +/// including fee-related addresses and L1 chain-specific settings. +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ScrollChainConfig { + /// The address of the L2 transaction fee vault. + /// This is an optional field that, when set, specifies where L2 transaction fees + /// will be sent or stored. + pub fee_vault_address: Option<Address>, + /// The maximum tx payload size of blocks that we produce. + pub max_tx_payload_bytes_per_block: usize, + /// The L1 configuration. + /// This field encapsulates specific settings and parameters required for L1 + pub l1_config: L1Config, +} + +impl ScrollChainConfig { + /// Extracts the scroll special info by looking for the `scroll` key. It is intended to be + /// parsed from a genesis file. + pub fn extract_from(others: &OtherFields) -> Option<Self> { + Self::try_from(others).ok() + } + + /// Returns the [`ScrollChainConfig`] for Scroll Mainnet. + pub const fn mainnet() -> Self { + Self { + fee_vault_address: Some(SCROLL_FEE_VAULT_ADDRESS), + max_tx_payload_bytes_per_block: MAX_TX_PAYLOAD_BYTES_PER_BLOCK, + l1_config: SCROLL_MAINNET_L1_CONFIG, + } + } + + /// Returns the [`ScrollChainConfig`] for Scroll Sepolia. + pub const fn sepolia() -> Self { + Self { + fee_vault_address: Some(SCROLL_FEE_VAULT_ADDRESS), + max_tx_payload_bytes_per_block: MAX_TX_PAYLOAD_BYTES_PER_BLOCK, + l1_config: SCROLL_SEPOLIA_L1_CONFIG, + } + } + + /// Returns the [`ScrollChainConfig`] for Scroll dev. + pub const fn dev() -> Self { + Self { + fee_vault_address: Some(SCROLL_FEE_VAULT_ADDRESS), + max_tx_payload_bytes_per_block: MAX_TX_PAYLOAD_BYTES_PER_BLOCK, + l1_config: SCROLL_DEV_L1_CONFIG, + } + } +} + +impl TryFrom<&OtherFields> for ScrollChainConfig { + type Error = serde_json::Error; + + fn try_from(others: &OtherFields) -> Result<Self, Self::Error> { + if let Some(Ok(scroll_chain_config)) = others.get_deserialized::<Self>("scroll") { + Ok(scroll_chain_config) + } else { + Err(serde_json::Error::missing_field("scroll")) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::SCROLL_MAINNET_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS; + + use alloy_primitives::address; + + #[test] + fn test_extract_scroll_genesis_info() { + let genesis_info = r#" + { + "archimedesBlock": 0, + "bernoulliBlock": 10, + "curieBlock": 12, + "darwinTime": 0, + "euclidTime": 11, + "feynmanTime": 100, + "galileoTime": 110, + "galileoV2Time": 120 + } + "#; + + let others: OtherFields = serde_json::from_str(genesis_info).unwrap(); + let genesis_info = ScrollHardforkInfo::extract_from(&others).unwrap(); + + assert_eq!( + genesis_info, + ScrollHardforkInfo { + archimedes_block: Some(0), + bernoulli_block: Some(10), + curie_block: Some(12), + darwin_time: Some(0), + darwin_v2_time: None, + euclid_time: Some(11), + euclid_v2_time: None, + feynman_time: Some(100), + galileo_time: Some(110), + galileo_v2_time: Some(120), + } + ); + } + + #[test] + fn test_extract_scroll_chain_info() { + let chain_info_str = r#" + { + "archimedesBlock": 0, + "bernoulliBlock": 10, + "curieBlock": 12, + "darwinTime": 0, + "euclidTime": 11, + "feynmanTime": 100, + "galileoTime": 110, + "galileoV2Time": 120, + "scroll": { + "feeVaultAddress": "0x5300000000000000000000000000000000000005", + "maxTxPayloadBytesPerBlock": 122880, + "l1Config": { + "l1ChainId": 1, + "l1MessageQueueAddress": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B", + "l1MessageQueueV2Address": "0x56971da63A3C0205184FEF096E9ddFc7A8C2D18a", + "l2SystemConfigAddress": "0x331A873a2a85219863d80d248F9e2978fE88D0Ea", + "scrollChainAddress": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556", + "numL1MessagesPerBlock": 10 + } + } + } + "#; + + let others: OtherFields = serde_json::from_str(chain_info_str).unwrap(); + let chain_info = ScrollChainInfo::extract_from(&others).unwrap(); + + let expected = ScrollChainInfo { + hard_fork_info: Some(ScrollHardforkInfo { + archimedes_block: Some(0), + bernoulli_block: Some(10), + curie_block: Some(12), + darwin_time: Some(0), + darwin_v2_time: None, + euclid_time: Some(11), + euclid_v2_time: None, + feynman_time: Some(100), + galileo_time: Some(110), + galileo_v2_time: Some(120), + }), + scroll_chain_config: ScrollChainConfig { + fee_vault_address: Some(address!("5300000000000000000000000000000000000005")), + max_tx_payload_bytes_per_block: MAX_TX_PAYLOAD_BYTES_PER_BLOCK, + l1_config: L1Config { + l1_chain_id: 1, + l1_message_queue_address: address!("0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"), + l1_message_queue_v2_address: address!( + "56971da63A3C0205184FEF096E9ddFc7A8C2D18a" + ), + l2_system_config_address: SCROLL_MAINNET_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, + scroll_chain_address: address!("a13BAF47339d63B743e7Da8741db5456DAc1E556"), + num_l1_messages_per_block: 10, + }, + }, + }; + assert_eq!(chain_info, expected); + } +}
diff --git reth/crates/scroll/chainspec/src/lib.rs scroll-reth/crates/scroll/chainspec/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f07275456898988158e707d7c41f2914ce01a9e --- /dev/null +++ scroll-reth/crates/scroll/chainspec/src/lib.rs @@ -0,0 +1,916 @@ +//! Scroll-Reth chain specs. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +use alloc::{boxed::Box, vec, vec::Vec}; +use alloy_chains::Chain; +use alloy_consensus::Header; +use alloy_genesis::Genesis; +use alloy_primitives::{B256, U256}; +use derive_more::{Constructor, Deref, From, Into}; +use reth_chainspec::{ + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, EthChainSpec, + EthereumCapabilities, EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, +}; +use reth_ethereum_forks::{ + ChainHardforks, EthereumHardfork, ForkCondition, ForkFilterKey, ForkHash, Hardfork, +}; +use reth_network_peers::NodeRecord; +use reth_primitives_traits::SealedHeader; +use scroll_alloy_hardforks::{ScrollHardfork, ScrollHardforks}; + +use alloy_eips::eip7840::BlobParams; +#[cfg(not(feature = "std"))] +use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "std")] +use std::sync::LazyLock; + +extern crate alloc; + +mod constants; +pub use constants::{ + MAX_TX_PAYLOAD_BYTES_PER_BLOCK, SCROLL_BASE_FEE_PARAMS_FEYNMAN, SCROLL_DEV_L1_CONFIG, + SCROLL_DEV_L1_MESSAGE_QUEUE_ADDRESS, SCROLL_DEV_L1_MESSAGE_QUEUE_V2_ADDRESS, + SCROLL_DEV_L1_PROXY_ADDRESS, SCROLL_DEV_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, + SCROLL_DEV_MAX_L1_MESSAGES, SCROLL_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_FEYNMAN, + SCROLL_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER_FEYNMAN, SCROLL_FEE_VAULT_ADDRESS, + SCROLL_MAINNET_GENESIS_HASH, SCROLL_MAINNET_L1_CONFIG, SCROLL_MAINNET_L1_MESSAGE_QUEUE_ADDRESS, + SCROLL_MAINNET_L1_MESSAGE_QUEUE_V2_ADDRESS, SCROLL_MAINNET_L1_PROXY_ADDRESS, + SCROLL_MAINNET_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, SCROLL_MAINNET_MAX_L1_MESSAGES, + SCROLL_SEPOLIA_GENESIS_HASH, SCROLL_SEPOLIA_L1_CONFIG, SCROLL_SEPOLIA_L1_MESSAGE_QUEUE_ADDRESS, + SCROLL_SEPOLIA_L1_MESSAGE_QUEUE_V2_ADDRESS, SCROLL_SEPOLIA_L1_PROXY_ADDRESS, + SCROLL_SEPOLIA_L2_SYSTEM_CONFIG_CONTRACT_ADDRESS, SCROLL_SEPOLIA_MAX_L1_MESSAGES, +}; + +mod dev; +pub use dev::SCROLL_DEV; + +mod genesis; +pub use genesis::{ScrollChainConfig, ScrollChainInfo}; + +// convenience re-export of the chain spec provider. +pub use reth_chainspec::ChainSpecProvider; +use reth_scroll_forks::SCROLL_MAINNET_HARDFORKS; + +mod scroll; +pub use scroll::SCROLL_MAINNET; + +mod scroll_sepolia; +pub use scroll_sepolia::SCROLL_SEPOLIA; + +/// Chain spec builder for a Scroll chain. +#[derive(Debug, Default, From)] +pub struct ScrollChainSpecBuilder { + /// [`ChainSpecBuilder`] + inner: ChainSpecBuilder, +} + +impl ScrollChainSpecBuilder { + /// Construct a new builder from the scroll mainnet chain spec. + pub fn scroll_mainnet() -> Self { + Self { + inner: ChainSpecBuilder::default() + .chain(SCROLL_MAINNET.chain) + .genesis(SCROLL_MAINNET.genesis.clone()) + .with_forks(SCROLL_MAINNET.hardforks.clone()), + } + } + + /// Construct a new builder from the scroll sepolia chain spec. + pub fn scroll_sepolia() -> Self { + Self { + inner: ChainSpecBuilder::default() + .chain(SCROLL_SEPOLIA.chain) + .genesis(SCROLL_SEPOLIA.genesis.clone()) + .with_forks(SCROLL_SEPOLIA.hardforks.clone()), + } + } +} + +impl ScrollChainSpecBuilder { + /// Set the chain ID + pub fn chain(mut self, chain: Chain) -> Self { + self.inner = self.inner.chain(chain); + self + } + + /// Set the genesis block. + pub fn genesis(mut self, genesis: Genesis) -> Self { + self.inner = self.inner.genesis(genesis); + self + } + + /// Add the given fork with the given activation condition to the spec. + pub fn with_fork<H: Hardfork>(mut self, fork: H, condition: ForkCondition) -> Self { + self.inner = self.inner.with_fork(fork, condition); + self + } + + /// Add the given forks with the given activation condition to the spec. + pub fn with_forks(mut self, forks: ChainHardforks) -> Self { + self.inner = self.inner.with_forks(forks); + self + } + + /// Remove the given fork from the spec. + pub fn without_fork(mut self, fork: ScrollHardfork) -> Self { + self.inner = self.inner.without_fork(fork); + self + } + + /// Enable Archimedes at genesis + pub fn archimedes_activated(mut self) -> Self { + self.inner = self.inner.london_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::Archimedes, ForkCondition::Block(0)); + self + } + + /// Enable Bernoulli at genesis + pub fn bernoulli_activated(mut self) -> Self { + self = self.archimedes_activated(); + self.inner = self.inner.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); + self.inner = self.inner.with_fork(ScrollHardfork::Bernoulli, ForkCondition::Block(0)); + self + } + + /// Enable Curie at genesis + pub fn curie_activated(mut self) -> Self { + self = self.bernoulli_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::Curie, ForkCondition::Block(0)); + self + } + + /// Enable Darwin at genesis + pub fn darwin_activated(mut self) -> Self { + self = self.curie_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::Darwin, ForkCondition::Timestamp(0)); + self + } + + /// Enable `DarwinV2` at genesis + pub fn darwin_v2_activated(mut self) -> Self { + self = self.darwin_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::DarwinV2, ForkCondition::Timestamp(0)); + self + } + + /// Enable `Euclid` at genesis + pub fn euclid_activated(mut self) -> Self { + self = self.darwin_v2_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::Euclid, ForkCondition::Timestamp(0)); + self + } + + /// Enable `EuclidV2` at genesis + pub fn euclid_v2_activated(mut self) -> Self { + self = self.euclid_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::EuclidV2, ForkCondition::Timestamp(0)); + self + } + + /// Enable `Feynman` at genesis + pub fn feynman_activated(mut self) -> Self { + self = self.euclid_v2_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::Feynman, ForkCondition::Timestamp(0)); + self + } + + /// Enable `Galileo` at genesis + pub fn galileo_activated(mut self) -> Self { + self = self.feynman_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::Galileo, ForkCondition::Timestamp(0)); + self + } + + /// Enable `GalileoV2` at genesis + pub fn galileo_v2_activated(mut self) -> Self { + self = self.galileo_activated(); + self.inner = self.inner.with_fork(ScrollHardfork::GalileoV2, ForkCondition::Timestamp(0)); + self + } + + /// Build the resulting [`ScrollChainSpec`]. + /// + /// # Panics + /// + /// This function panics if the chain ID and genesis is not set ([`Self::chain`] and + /// [`Self::genesis`]) + pub fn build(self, config: ScrollChainConfig) -> ScrollChainSpec { + ScrollChainSpec { inner: self.inner.build(), config } + } +} + +// Used by the CLI for custom genesis files. +impl ScrollChainSpec { + /// Build from a custom `Genesis`, ensuring: + /// - `genesis_header` has `base_fee_per_gas` (0 if Feynman@genesis) + /// - `base_fee_params` switch to Scroll defaults at Feynman + pub fn from_custom_genesis(genesis: Genesis) -> Self { + // Use the existing From<Genesis> as the base. + let mut spec: Self = genesis.into(); + + // Determine whether Feynman is active at genesis. + let feynman_active_at_genesis = + spec.is_feynman_active_at_timestamp(spec.inner.genesis.timestamp); + + // Ensure the genesis header has a base fee when required. + let mut header = make_genesis_header(&spec.inner.genesis); + if header.base_fee_per_gas.is_none() && feynman_active_at_genesis { + header.base_fee_per_gas = Some(0); + } + spec.inner.genesis_header = SealedHeader::new_unhashed(header); + + // Use Scroll's EIP-1559 params from Feynman onwards. + spec.inner.base_fee_params = BaseFeeParamsKind::Variable( + vec![(ScrollHardfork::Feynman.boxed(), SCROLL_BASE_FEE_PARAMS_FEYNMAN)].into(), + ); + spec + } +} + +/// Returns the chain configuration. +#[auto_impl::auto_impl(Arc)] +pub trait ChainConfig { + /// The configuration. + type Config; + + /// Returns the chain configuration. + fn chain_config(&self) -> &Self::Config; +} + +impl ChainConfig for ScrollChainSpec { + type Config = ScrollChainConfig; + + fn chain_config(&self) -> &Self::Config { + &self.config + } +} + +/// Scroll chain spec type. +#[derive(Debug, Clone, Deref, Into, Constructor, PartialEq, Eq)] +pub struct ScrollChainSpec { + /// [`ChainSpec`]. + #[deref] + pub inner: ChainSpec, + /// [`ScrollChainConfig`] + pub config: ScrollChainConfig, +} + +impl EthChainSpec for ScrollChainSpec { + type Header = Header; + + fn chain(&self) -> alloy_chains::Chain { + self.inner.chain() + } + + fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { + self.inner.base_fee_params_at_timestamp(timestamp) + } + + fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> { + self.inner.blob_params_at_timestamp(timestamp) + } + + fn deposit_contract(&self) -> Option<&DepositContract> { + self.inner.deposit_contract() + } + + fn genesis_hash(&self) -> B256 { + self.inner.genesis_hash() + } + + fn prune_delete_limit(&self) -> usize { + self.inner.prune_delete_limit() + } + + fn display_hardforks(&self) -> Box<dyn alloc::fmt::Display> { + Box::new(ChainSpec::display_hardforks(self)) + } + + fn genesis_header(&self) -> &Header { + self.inner.genesis_header() + } + + fn genesis(&self) -> &Genesis { + self.inner.genesis() + } + + fn bootnodes(&self) -> Option<Vec<NodeRecord>> { + self.inner.bootnodes() + } + + fn final_paris_total_difficulty(&self) -> Option<U256> { + self.inner.final_paris_total_difficulty() + } +} + +impl EthereumCapabilities for ScrollChainSpec { + fn withdrawals_active(&self, _: u64) -> bool { + // Scroll doesn't activate withdrawals. + false + } +} + +fn make_genesis_header(genesis: &Genesis) -> Header { + Header { + gas_limit: genesis.gas_limit, + difficulty: genesis.difficulty, + nonce: genesis.nonce.into(), + extra_data: genesis.extra_data.clone(), + state_root: reth_trie_common::root::state_root_ref_unhashed(&genesis.alloc), + timestamp: genesis.timestamp, + mix_hash: genesis.mix_hash, + beneficiary: genesis.coinbase, + base_fee_per_gas: genesis + .base_fee_per_gas + .map(|b| b.try_into().expect("base fee should fit in u64")), + withdrawals_root: None, + parent_beacon_block_root: None, + blob_gas_used: None, + excess_blob_gas: None, + requests_hash: None, + ..Default::default() + } +} + +impl Hardforks for ScrollChainSpec { + fn fork<H: Hardfork>(&self, fork: H) -> ForkCondition { + self.inner.fork(fork) + } + + fn forks_iter(&self) -> impl Iterator<Item = (&dyn Hardfork, ForkCondition)> { + self.inner.forks_iter() + } + + fn fork_id(&self, head: &Head) -> ForkId { + // TODO: Geth does not support time based hard forks for its `ForkID` calculation. As such, + // we are only using block based hard forks for now. + // self.inner.fork_id(head) + + // The following code is modified version of self.inner.fork_id(head) to ignore time based + // hard forks. + let mut forkhash = ForkHash::from(self.inner.genesis_hash()); + let mut current_applied = 0; + // handle all block forks before handling timestamp based forks. see: https://eips.ethereum.org/EIPS/eip-6122 + for (_, cond) in self.hardforks.forks_iter() { + // handle block based forks and the sepolia merge netsplit block edge case (TTD + // ForkCondition with Some(block)) + if let ForkCondition::Block(block) | + ForkCondition::TTD { fork_block: Some(block), .. } = cond + { + if head.number >= block { + // skip duplicated hardforks: hardforks enabled at genesis block + if block != current_applied { + forkhash += block; + current_applied = block; + } + } else { + // we can return here because this block fork is not active, so we set the + // `next` value + return ForkId { hash: forkhash, next: block } + } + } + } + ForkId { hash: forkhash, next: 0 } + } + + fn latest_fork_id(&self) -> ForkId { + self.inner.latest_fork_id() + } + + fn fork_filter(&self, head: Head) -> ForkFilter { + let forks = self.inner.hardforks.forks_iter().filter_map(|(_, condition)| { + // We filter out TTD-based forks w/o a pre-known block since those do not show up in the + // fork filter. + Some(match condition { + ForkCondition::Block(block) | + ForkCondition::TTD { fork_block: Some(block), .. } => ForkFilterKey::Block(block), + _ => return None, + }) + }); + + ForkFilter::new(head, self.genesis_hash(), self.genesis_timestamp(), forks) + } +} + +impl EthereumHardforks for ScrollChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.fork(fork) + } +} + +impl ScrollHardforks for ScrollChainSpec { + fn scroll_fork_activation(&self, fork: ScrollHardfork) -> ForkCondition { + self.fork(fork) + } +} + +impl From<ChainSpec> for ScrollChainSpec { + fn from(value: ChainSpec) -> Self { + let genesis = value.genesis; + genesis.into() + } +} + +impl From<Genesis> for ScrollChainSpec { + fn from(genesis: Genesis) -> Self { + let scroll_chain_info = ScrollConfigInfo::extract_from(&genesis); + let hard_fork_info = + scroll_chain_info.scroll_chain_info.hard_fork_info.expect("load scroll hard fork info"); + + // Block-based hardforks + let hardfork_opts = [ + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (ScrollHardfork::Archimedes.boxed(), hard_fork_info.archimedes_block), + (ScrollHardfork::Bernoulli.boxed(), hard_fork_info.bernoulli_block), + (ScrollHardfork::Curie.boxed(), hard_fork_info.curie_block), + ]; + let mut block_hardforks = hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::<Vec<_>>(); + + // Time-based hardforks + let time_hardfork_opts = [ + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (ScrollHardfork::Darwin.boxed(), hard_fork_info.darwin_time), + (ScrollHardfork::DarwinV2.boxed(), hard_fork_info.darwin_v2_time), + (ScrollHardfork::Euclid.boxed(), hard_fork_info.euclid_time), + (ScrollHardfork::EuclidV2.boxed(), hard_fork_info.euclid_v2_time), + (ScrollHardfork::Feynman.boxed(), hard_fork_info.feynman_time), + (ScrollHardfork::Galileo.boxed(), hard_fork_info.galileo_time), + (ScrollHardfork::GalileoV2.boxed(), hard_fork_info.galileo_v2_time), + ]; + + let mut time_hardforks = time_hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| { + opt.map(|time| (hardfork, ForkCondition::Timestamp(time))) + }) + .collect::<Vec<_>>(); + + block_hardforks.append(&mut time_hardforks); + + // Ordered Hardforks + let mainnet_hardforks = SCROLL_MAINNET_HARDFORKS.clone(); + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = block_hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(block_hardforks.remove(pos)); + } + } + + // append the remaining unknown hardforks to ensure we don't filter any out + ordered_hardforks.append(&mut block_hardforks); + + Self { + inner: ChainSpec { + chain: genesis.config.chain_id.into(), + genesis, + hardforks: ChainHardforks::new(ordered_hardforks), + ..Default::default() + }, + config: scroll_chain_info.scroll_chain_info.scroll_chain_config, + } + } +} + +#[derive(Default, Debug)] +struct ScrollConfigInfo { + scroll_chain_info: ScrollChainInfo, +} + +impl ScrollConfigInfo { + fn extract_from(genesis: &Genesis) -> Self { + Self { + scroll_chain_info: ScrollChainInfo::extract_from(&genesis.config.extra_fields) + .expect("extract scroll extra fields failed"), + } + } +} + +#[cfg(test)] +mod tests { + use crate::*; + use alloy_genesis::{ChainConfig, Genesis}; + use alloy_primitives::b256; + use reth_chainspec::{test_fork_ids, ForkFilterKey}; + use reth_ethereum_forks::{EthereumHardfork, ForkHash}; + + #[test] + fn scroll_mainnet_genesis_hash() { + let scroll_mainnet = + ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet()); + assert_eq!( + b256!("908789cb20d00fc6070093f142aa8d02c21cfb0a9b9cfd4621d8cf0255234c0f"), + scroll_mainnet.genesis_hash() + ); + } + + #[test] + fn scroll_sepolia_genesis_hash() { + let scroll_sepolia = + ScrollChainSpecBuilder::scroll_sepolia().build(ScrollChainConfig::sepolia()); + assert_eq!( + b256!("04414a71425e8ef2632e99a4b148c69d69bab8ffa47ee814231331a33d073df2"), + scroll_sepolia.genesis_hash() + ); + } + + #[test] + fn scroll_mainnet_forkids_deref() { + test_fork_ids( + &SCROLL_MAINNET, + &[ + ( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0xea, 0x6b, 0x56, 0xca]), next: 5220340 }, + ), + // Bernoulli + ( + Head { number: 5220340, ..Default::default() }, + ForkId { hash: ForkHash([0xee, 0x46, 0xae, 0x2a]), next: 7096836 }, + ), + // Curie + ( + Head { number: 7096836, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 1724227200 }, + ), + // Darwin + ( + Head { number: 7096836, timestamp: 1724227200, ..Default::default() }, + ForkId { hash: ForkHash([0xcc, 0xeb, 0x09, 0xb0]), next: 1725264000 }, + ), + // DarwinV2 + ( + Head { number: 7096836, timestamp: 1725264000, ..Default::default() }, + ForkId { hash: ForkHash([0x21, 0xa2, 0x07, 0x54]), next: 1744815600 }, + ), + // Euclid + ( + Head { number: 7096836, timestamp: 1744815600, ..Default::default() }, + ForkId { hash: ForkHash([0xca, 0xc5, 0x80, 0xca]), next: 1745305200 }, + ), + // EuclidV2 + ( + Head { number: 7096836, timestamp: 1745305200, ..Default::default() }, + ForkId { hash: ForkHash([0x0e, 0xcf, 0xb2, 0x31]), next: 1755576000 }, + ), + // Feynman + ( + Head { number: 7096836, timestamp: 1755576000, ..Default::default() }, + ForkId { hash: ForkHash([0x38, 0x0f, 0x78, 0x5d]), next: 1765868400 }, + ), + // Galileo + ( + Head { number: 7096836, timestamp: 1765868400, ..Default::default() }, + ForkId { hash: ForkHash([0x58, 0xdf, 0x9b, 0x21]), next: 1766041200 }, + ), + // GalileoV2 + ( + Head { number: 7096836, timestamp: 1766041200, ..Default::default() }, + ForkId { hash: ForkHash([0xca, 0x90, 0x54, 0xd4]), next: 0 }, + ), + ], + ); + } + + #[test] + fn scroll_mainnet_forkids() { + // To maintain compatibility with l2geth, we need to ignore time-based forks in the ForkID. + + let cases = [ + ( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0xea, 0x6b, 0x56, 0xca]), next: 5220340 }, + ), + // Bernoulli + ( + Head { number: 5220340, ..Default::default() }, + ForkId { hash: ForkHash([0xee, 0x46, 0xae, 0x2a]), next: 7096836 }, + ), + // Curie + ( + Head { number: 7096836, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // Note: The following time-based forks are ignored in the ForkID calculation. + // Darwin + ( + Head { number: 7096836, timestamp: 1724227200, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // DarwinV2 + ( + Head { number: 7096836, timestamp: 1725264000, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // Euclid + ( + Head { number: 7096836, timestamp: 1744815600, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // EuclidV2 + ( + Head { number: 7096836, timestamp: 1745305200, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // Feynman + ( + Head { number: 7096836, timestamp: 1755576000, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // Galileo + ( + Head { number: 7096836, timestamp: 1765868400, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + // GalileoV2 + ( + Head { number: 7096836, timestamp: 1766041200, ..Default::default() }, + ForkId { hash: ForkHash([0x18, 0xd3, 0xc8, 0xd9]), next: 0 }, + ), + ]; + + for (block, expected_id) in cases { + let computed_id = SCROLL_MAINNET.fork_id(&block); + assert_eq!( + expected_id, computed_id, + "Expected fork ID {:?}, computed fork ID {:?} at block {}", + expected_id, computed_id, block.number + ); + } + } + + #[test] + fn scroll_mainnet_fork_filter_excludes_time_based_forks() { + let head = Default::default(); + let fork_filter = SCROLL_MAINNET.fork_filter(head); + + let forks = vec![ + ForkFilterKey::Block(0), + ForkFilterKey::Block(5220340), + ForkFilterKey::Block(7096836), + ]; + let expected_fork_filter = ForkFilter::new( + head, + SCROLL_MAINNET.genesis_hash(), + SCROLL_MAINNET.genesis_timestamp(), + forks, + ); + + assert_eq!(fork_filter, expected_fork_filter); + } + + #[test] + fn scroll_sepolia_forkids() { + test_fork_ids( + &SCROLL_SEPOLIA, + &[ + ( + Head { number: 0, ..Default::default() }, + ForkId { hash: ForkHash([0x25, 0xfa, 0xe4, 0x54]), next: 3747132 }, + ), + // Bernoulli + ( + Head { number: 3747132, ..Default::default() }, + ForkId { hash: ForkHash([0xda, 0x76, 0xc2, 0x2d]), next: 4740239 }, + ), + // Curie + ( + Head { number: 4740239, ..Default::default() }, + ForkId { hash: ForkHash([0x9f, 0xb4, 0x75, 0xf1]), next: 1723622400 }, + ), + // Darwin + ( + Head { number: 4740239, timestamp: 1723622400, ..Default::default() }, + ForkId { hash: ForkHash([0xe9, 0x26, 0xd4, 0x9b]), next: 1724832000 }, + ), + // DarwinV2 + ( + Head { number: 4740239, timestamp: 1724832000, ..Default::default() }, + ForkId { hash: ForkHash([0x69, 0xf3, 0x7e, 0xde]), next: 1741680000 }, + ), + // Euclid + ( + Head { number: 4740239, timestamp: 1741680000, ..Default::default() }, + ForkId { hash: ForkHash([0xf7, 0xac, 0x7e, 0xfc]), next: 1741852800 }, + ), + // EuclidV2 + ( + Head { number: 4740239, timestamp: 1741852800, ..Default::default() }, + ForkId { hash: ForkHash([0x51, 0x7e, 0x0f, 0x1c]), next: 1753167600 }, + ), + // Feynman + ( + Head { number: 4740239, timestamp: 1753167600, ..Default::default() }, + ForkId { hash: ForkHash([0x19, 0xbb, 0x92, 0xc6]), next: 1764054000 }, + ), + // Galileo + ( + Head { number: 4740239, timestamp: 1764054000, ..Default::default() }, + ForkId { hash: ForkHash([0xe8, 0xc2, 0x20, 0x80]), next: 1764831600 }, + ), + // GalileoV2 + ( + Head { number: 4740239, timestamp: 1764831600, ..Default::default() }, + ForkId { hash: ForkHash([0x48, 0xff, 0x11, 0x2e]), next: 0 }, + ), + ], + ); + } + + #[test] + fn is_bernoulli_active() { + let scroll_mainnet = + ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet()); + assert!(!scroll_mainnet.is_bernoulli_active_at_block(1)) + } + + #[test] + fn parse_scroll_hardforks() { + let geth_genesis = r#" + { + "config": { + "bernoulliBlock": 10, + "curieBlock": 20, + "darwinTime": 30, + "darwinV2Time": 31, + "euclidTime": 32, + "euclidV2Time": 33, + "feynmanTime": 34, + "galileoTime": 35, + "galileoV2Time": 36, + "scroll": { + "feeVaultAddress": "0x5300000000000000000000000000000000000005", + "maxTxPayloadBytesPerBlock": 122880, + "l1Config": { + "l1ChainId": 1, + "l1MessageQueueAddress": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B", + "l1MessageQueueV2Address": "0x56971da63A3C0205184FEF096E9ddFc7A8C2D18a", + "l2SystemConfigAddress": "0x331A873a2a85219863d80d248F9e2978fE88D0Ea", + "scrollChainAddress": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556", + "numL1MessagesPerBlock": 10 + } + } + } + }"#; + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + + let actual_bernoulli_block = genesis.config.extra_fields.get("bernoulliBlock"); + assert_eq!(actual_bernoulli_block, Some(serde_json::Value::from(10)).as_ref()); + let actual_curie_block = genesis.config.extra_fields.get("curieBlock"); + assert_eq!(actual_curie_block, Some(serde_json::Value::from(20)).as_ref()); + let actual_darwin_timestamp = genesis.config.extra_fields.get("darwinTime"); + assert_eq!(actual_darwin_timestamp, Some(serde_json::Value::from(30)).as_ref()); + let actual_darwin_v2_timestamp = genesis.config.extra_fields.get("darwinV2Time"); + assert_eq!(actual_darwin_v2_timestamp, Some(serde_json::Value::from(31)).as_ref()); + let actual_euclid_timestamp = genesis.config.extra_fields.get("euclidTime"); + assert_eq!(actual_euclid_timestamp, Some(serde_json::Value::from(32)).as_ref()); + let actual_euclid_v2_timestamp = genesis.config.extra_fields.get("euclidV2Time"); + assert_eq!(actual_euclid_v2_timestamp, Some(serde_json::Value::from(33)).as_ref()); + let actual_feynman_timestamp = genesis.config.extra_fields.get("feynmanTime"); + assert_eq!(actual_feynman_timestamp, Some(serde_json::Value::from(34)).as_ref()); + let actual_galileo_timestamp = genesis.config.extra_fields.get("galileoTime"); + assert_eq!(actual_galileo_timestamp, Some(serde_json::Value::from(35)).as_ref()); + let actual_galileo_v2_timestamp = genesis.config.extra_fields.get("galileoV2Time"); + assert_eq!(actual_galileo_v2_timestamp, Some(serde_json::Value::from(36)).as_ref()); + + let scroll_object = genesis.config.extra_fields.get("scroll").unwrap(); + assert_eq!( + scroll_object, + &serde_json::json!({ + "feeVaultAddress": "0x5300000000000000000000000000000000000005", + "maxTxPayloadBytesPerBlock": 122880, + "l1Config": { + "l1ChainId": 1, + "l1MessageQueueAddress": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B", + "l1MessageQueueV2Address": "0x56971da63A3C0205184FEF096E9ddFc7A8C2D18a", + "l2SystemConfigAddress": "0x331A873a2a85219863d80d248F9e2978fE88D0Ea", + "scrollChainAddress": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556", + "numL1MessagesPerBlock": 10 + } + }) + ); + + let chain_spec: ScrollChainSpec = genesis.into(); + + assert!(!chain_spec.is_fork_active_at_block(ScrollHardfork::Bernoulli, 0)); + assert!(!chain_spec.is_fork_active_at_block(ScrollHardfork::Curie, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Darwin, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::DarwinV2, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Euclid, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::EuclidV2, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Feynman, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Galileo, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(ScrollHardfork::GalileoV2, 0)); + + assert!(chain_spec.is_fork_active_at_block(ScrollHardfork::Bernoulli, 10)); + assert!(chain_spec.is_fork_active_at_block(ScrollHardfork::Curie, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Darwin, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::DarwinV2, 31)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Euclid, 32)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::EuclidV2, 33)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Feynman, 34)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::Galileo, 35)); + assert!(chain_spec.is_fork_active_at_timestamp(ScrollHardfork::GalileoV2, 36)); + } + + #[test] + fn test_fork_order_scroll_mainnet() { + let genesis = Genesis { + config: ChainConfig { + chain_id: 0, + homestead_block: Some(0), + dao_fork_block: Some(0), + dao_fork_support: false, + eip150_block: Some(0), + eip155_block: Some(0), + eip158_block: Some(0), + byzantium_block: Some(0), + constantinople_block: Some(0), + petersburg_block: Some(0), + istanbul_block: Some(0), + berlin_block: Some(0), + london_block: Some(0), + shanghai_time: Some(0), + extra_fields: [ + (String::from("archimedesBlock"), 0.into()), + (String::from("bernoulliBlock"), 0.into()), + (String::from("curieBlock"), 0.into()), + (String::from("darwinTime"), 0.into()), + (String::from("darwinV2Time"), 0.into()), + (String::from("feynmanTime"), 0.into()), + (String::from("galileoTime"), 0.into()), + (String::from("galileoV2Time"), 0.into()), + ( + String::from("scroll"), + serde_json::json!({ + "feeVaultAddress": "0x5300000000000000000000000000000000000005", + "maxTxPayloadBytesPerBlock": 122880, + "l1Config": { + "l1ChainId": 1, + "l1MessageQueueAddress": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B", + "l1MessageQueueV2Address": "0x56971da63A3C0205184FEF096E9ddFc7A8C2D18a", + "l2SystemConfigAddress": "0x331A873a2a85219863d80d248F9e2978fE88D0Ea", + "scrollChainAddress": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556", + "numL1MessagesPerBlock": 10 + } + }), + ), + ] + .into_iter() + .collect(), + ..Default::default() + }, + ..Default::default() + }; + + let chain_spec: ScrollChainSpec = genesis.into(); + + let hardforks: Vec<_> = chain_spec.hardforks.forks_iter().map(|(h, _)| h).collect(); + let expected_hardforks = vec![ + EthereumHardfork::Homestead.boxed(), + EthereumHardfork::Tangerine.boxed(), + EthereumHardfork::SpuriousDragon.boxed(), + EthereumHardfork::Byzantium.boxed(), + EthereumHardfork::Constantinople.boxed(), + EthereumHardfork::Petersburg.boxed(), + EthereumHardfork::Istanbul.boxed(), + EthereumHardfork::Berlin.boxed(), + EthereumHardfork::London.boxed(), + ScrollHardfork::Archimedes.boxed(), + EthereumHardfork::Shanghai.boxed(), + ScrollHardfork::Bernoulli.boxed(), + ScrollHardfork::Curie.boxed(), + ScrollHardfork::Darwin.boxed(), + ScrollHardfork::DarwinV2.boxed(), + ScrollHardfork::Feynman.boxed(), + ScrollHardfork::Galileo.boxed(), + ScrollHardfork::GalileoV2.boxed(), + ]; + + assert!(expected_hardforks + .iter() + .zip(hardforks.iter()) + .all(|(expected, actual)| &**expected == *actual)); + + assert_eq!(expected_hardforks.len(), hardforks.len()); + } +}
diff --git reth/crates/scroll/chainspec/src/scroll.rs scroll-reth/crates/scroll/chainspec/src/scroll.rs new file mode 100644 index 0000000000000000000000000000000000000000..934fd62c9d1fbb4d8f71ebdafe5faa3a85f9f6f0 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/src/scroll.rs @@ -0,0 +1,36 @@ +//! Chain specification for the Scroll Mainnet network. + +use crate::{ + constants::SCROLL_BASE_FEE_PARAMS_FEYNMAN, make_genesis_header, LazyLock, ScrollChainConfig, + ScrollChainSpec, SCROLL_MAINNET_GENESIS_HASH, +}; +use alloc::{sync::Arc, vec}; + +use alloy_chains::Chain; +use reth_chainspec::{BaseFeeParamsKind, ChainSpec, Hardfork}; +use reth_primitives_traits::SealedHeader; +use reth_scroll_forks::SCROLL_MAINNET_HARDFORKS; +use scroll_alloy_hardforks::ScrollHardfork; + +/// The Scroll Mainnet spec +pub static SCROLL_MAINNET: LazyLock<Arc<ScrollChainSpec>> = LazyLock::new(|| { + let genesis = serde_json::from_str(include_str!("../res/genesis/scroll.json")) + .expect("Can't deserialize Scroll Mainnet genesis json"); + ScrollChainSpec { + inner: ChainSpec { + chain: Chain::scroll_mainnet(), + genesis_header: SealedHeader::new( + make_genesis_header(&genesis), + SCROLL_MAINNET_GENESIS_HASH, + ), + genesis, + hardforks: SCROLL_MAINNET_HARDFORKS.clone(), + base_fee_params: BaseFeeParamsKind::Variable( + vec![(ScrollHardfork::Feynman.boxed(), SCROLL_BASE_FEE_PARAMS_FEYNMAN)].into(), + ), + ..Default::default() + }, + config: ScrollChainConfig::mainnet(), + } + .into() +});
diff --git reth/crates/scroll/chainspec/src/scroll_sepolia.rs scroll-reth/crates/scroll/chainspec/src/scroll_sepolia.rs new file mode 100644 index 0000000000000000000000000000000000000000..759f1d6c4017bbc650f6b3d9df1bf7a67fbbad89 --- /dev/null +++ scroll-reth/crates/scroll/chainspec/src/scroll_sepolia.rs @@ -0,0 +1,36 @@ +//! Chain specification for the Scroll Sepolia testnet network. + +use crate::{ + constants::SCROLL_BASE_FEE_PARAMS_FEYNMAN, make_genesis_header, LazyLock, ScrollChainConfig, + ScrollChainSpec, SCROLL_SEPOLIA_GENESIS_HASH, +}; +use alloc::{sync::Arc, vec}; + +use alloy_chains::Chain; +use reth_chainspec::{BaseFeeParamsKind, ChainSpec, Hardfork}; +use reth_primitives_traits::SealedHeader; +use reth_scroll_forks::SCROLL_SEPOLIA_HARDFORKS; +use scroll_alloy_hardforks::ScrollHardfork; + +/// The Scroll Sepolia spec +pub static SCROLL_SEPOLIA: LazyLock<Arc<ScrollChainSpec>> = LazyLock::new(|| { + let genesis = serde_json::from_str(include_str!("../res/genesis/sepolia_scroll.json")) + .expect("Can't deserialize Scroll Sepolia genesis json"); + ScrollChainSpec { + inner: ChainSpec { + chain: Chain::scroll_sepolia(), + genesis_header: SealedHeader::new( + make_genesis_header(&genesis), + SCROLL_SEPOLIA_GENESIS_HASH, + ), + genesis, + hardforks: SCROLL_SEPOLIA_HARDFORKS.clone(), + base_fee_params: BaseFeeParamsKind::Variable( + vec![(ScrollHardfork::Feynman.boxed(), SCROLL_BASE_FEE_PARAMS_FEYNMAN)].into(), + ), + ..Default::default() + }, + config: ScrollChainConfig::sepolia(), + } + .into() +});
diff --git reth/crates/scroll/cli/Cargo.toml scroll-reth/crates/scroll/cli/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..d2dd4e51eea33acfcdc952c3f66dc0222a3eca6c --- /dev/null +++ scroll-reth/crates/scroll/cli/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "reth-scroll-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-db = { workspace = true, features = ["scroll-alloy-traits"] } +reth-cli.workspace = true +reth-cli-commands.workspace = true +reth-cli-runner.workspace = true +reth-consensus.workspace = true +reth-node-builder.workspace = true +reth-node-core.workspace = true +reth-node-metrics.workspace = true +reth-tracing.workspace = true + +# scroll +reth-scroll-chainspec.workspace = true +reth-scroll-consensus.workspace = true +reth-scroll-evm.workspace = true +reth-scroll-node.workspace = true +reth-scroll-primitives = { workspace = true, features = ["reth-codec"] } +scroll-alloy-consensus = { workspace = true, optional = true } + +# misc +eyre.workspace = true +clap.workspace = true +proptest = { workspace = true, optional = true } +tracing.workspace = true + +[features] +dev = [ + "dep:proptest", + "dep:scroll-alloy-consensus", + "reth-cli-commands/arbitrary", +]
diff --git reth/crates/scroll/cli/src/app.rs scroll-reth/crates/scroll/cli/src/app.rs new file mode 100644 index 0000000000000000000000000000000000000000..31ae157d8c42a218cba7073a3b2b72356d128013 --- /dev/null +++ scroll-reth/crates/scroll/cli/src/app.rs @@ -0,0 +1,115 @@ +use crate::{Cli, Commands}; +use eyre::{eyre, Result}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::launcher::Launcher; +use reth_cli_runner::CliRunner; +use reth_node_metrics::recorder::install_prometheus_recorder; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_consensus::ScrollBeaconConsensus; +use reth_scroll_evm::ScrollExecutorProvider; +use reth_scroll_node::ScrollNode; +use reth_tracing::{FileWorkerGuard, Layers}; +use std::{fmt, sync::Arc}; +use tracing::info; + +/// A wrapper around a parsed CLI that handles command execution. +#[derive(Debug)] +pub struct CliApp<Spec: ChainSpecParser, Ext: clap::Args + fmt::Debug> { + cli: Cli<Spec, Ext>, + runner: Option<CliRunner>, + layers: Option<Layers>, + guard: Option<FileWorkerGuard>, +} + +impl<C, Ext> CliApp<C, Ext> +where + C: ChainSpecParser<ChainSpec = ScrollChainSpec>, + Ext: clap::Args + fmt::Debug, +{ + pub(crate) fn new(cli: Cli<C, Ext>) -> Self { + Self { cli, runner: None, layers: Some(Layers::new()), guard: None } + } + + /// Sets the runner for the CLI commander. + /// + /// This replaces any existing runner with the provided one. + pub fn set_runner(&mut self, runner: CliRunner) { + self.runner = Some(runner); + } + + /// Access to tracing layers. + /// + /// Returns a mutable reference to the tracing layers, or error + /// if tracing initialized and layers have detached already. + pub fn access_tracing_layers(&mut self) -> Result<&mut Layers> { + self.layers.as_mut().ok_or_else(|| eyre!("Tracing already initialized")) + } + + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run(mut self, launcher: impl Launcher<C, Ext>) -> Result<()> { + let runner = match self.runner.take() { + Some(runner) => runner, + None => CliRunner::try_default_runtime()?, + }; + + // add network name to logs dir + // Add network name if available to the logs dir + if let Some(chain_spec) = self.cli.command.chain_spec() { + self.cli.logs.log_file_directory = + self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); + } + + self.init_tracing()?; + + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + + let components = |spec: Arc<ScrollChainSpec>| { + ( + ScrollExecutorProvider::scroll(spec.clone()), + Arc::new(ScrollBeaconConsensus::new(spec)), + ) + }; + + match self.cli.command { + Commands::Node(command) => { + runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) + } + Commands::Import(command) => { + runner.run_blocking_until_ctrl_c(command.execute::<ScrollNode, _>(components)) + } + Commands::Init(command) => { + runner.run_blocking_until_ctrl_c(command.execute::<ScrollNode>()) + } + Commands::InitState(command) => { + runner.run_blocking_until_ctrl_c(command.execute::<ScrollNode>()) + } + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => { + runner.run_blocking_until_ctrl_c(command.execute::<ScrollNode>()) + } + Commands::Stage(command) => runner + .run_command_until_exit(|ctx| command.execute::<ScrollNode, _>(ctx, components)), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::<ScrollNode>()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<ScrollNode>()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function stores guard to the struct. + pub fn init_tracing(&mut self) -> Result<()> { + if self.guard.is_none() { + let layers = self.layers.take().unwrap_or_default(); + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); + } + Ok(()) + } +}
diff --git reth/crates/scroll/cli/src/commands/mod.rs scroll-reth/crates/scroll/cli/src/commands/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..979c9e00974ce737d389803b1d8efee5b01dd8e3 --- /dev/null +++ scroll-reth/crates/scroll/cli/src/commands/mod.rs @@ -0,0 +1,76 @@ +#[cfg(feature = "dev")] +mod test_vectors; + +use crate::ScrollChainSpecParser; +use clap::Subcommand; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::{ + config_cmd, db, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, p2p, prune, + stage, +}; +use reth_scroll_chainspec::ScrollChainSpec; +use std::{fmt, sync::Arc}; + +/// Commands to be executed +#[derive(Debug, Subcommand)] +#[allow(clippy::large_enum_variant)] +pub enum Commands< + Spec: ChainSpecParser = ScrollChainSpecParser, + Ext: clap::Args + fmt::Debug = NoArgs, +> { + /// Start the node + #[command(name = "node")] + Node(Box<node::NodeCommand<Spec, Ext>>), + /// Initialize the database from a genesis file. + #[command(name = "init")] + Init(init_cmd::InitCommand<Spec>), + /// Initialize the database from a state dump file. + #[command(name = "init-state")] + InitState(init_state::InitStateCommand<Spec>), + /// This syncs RLP encoded blocks from a file. + #[command(name = "import")] + Import(import::ImportCommand<Spec>), + /// Dumps genesis block JSON configuration to stdout. + DumpGenesis(dump_genesis::DumpGenesisCommand<Spec>), + /// Database debugging utilities + #[command(name = "db")] + Db(db::Command<Spec>), + /// Manipulate individual stages. + #[command(name = "stage")] + Stage(Box<stage::Command<Spec>>), + /// P2P Debugging utilities + #[command(name = "p2p")] + P2P(p2p::Command<Spec>), + /// Write config to stdout + #[command(name = "config")] + Config(config_cmd::Command), + /// Prune according to the configuration without any limits + #[command(name = "prune")] + Prune(prune::PruneCommand<Spec>), + /// Generate Test Vectors + #[cfg(feature = "dev")] + #[command(name = "test-vectors")] + TestVectors(test_vectors::Command), +} + +impl<C: ChainSpecParser<ChainSpec = ScrollChainSpec>, Ext: clap::Args + fmt::Debug> + Commands<C, Ext> +{ + /// Returns the underlying chain being used for commands + pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> { + match self { + Self::Node(cmd) => cmd.chain_spec(), + Self::Init(cmd) => cmd.chain_spec(), + Self::InitState(cmd) => cmd.chain_spec(), + Self::Import(cmd) => cmd.chain_spec(), + Self::DumpGenesis(cmd) => cmd.chain_spec(), + Self::Db(cmd) => cmd.chain_spec(), + Self::Stage(cmd) => cmd.chain_spec(), + Self::P2P(cmd) => cmd.chain_spec(), + Self::Config(_) => None, + Self::Prune(cmd) => cmd.chain_spec(), + #[cfg(feature = "dev")] + Self::TestVectors(_) => None, + } + } +}
diff --git reth/crates/scroll/cli/src/commands/test_vectors.rs scroll-reth/crates/scroll/cli/src/commands/test_vectors.rs new file mode 100644 index 0000000000000000000000000000000000000000..f48c0f0e2e286aa39333ada95107a5450e6b73f1 --- /dev/null +++ scroll-reth/crates/scroll/cli/src/commands/test_vectors.rs @@ -0,0 +1,72 @@ +//! Command for generating test vectors. + +use clap::{Parser, Subcommand}; +use proptest::test_runner::TestRunner; +use reth_cli_commands::{ + compact_types, + test_vectors::{ + compact, + compact::{ + generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS, + READ_VECTORS as ETH_READ_VECTORS, + }, + tables, + }, +}; +use scroll_alloy_consensus::TxL1Message; + +/// Generate test-vectors for different data types. +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +#[derive(Subcommand, Debug)] +/// `reth test-vectors` subcommands +enum Subcommands { + /// Generates test vectors for specified tables. If no table is specified, generate for all. + Tables { + /// List of table names. Case-sensitive. + names: Vec<String>, + }, + /// Generates test vectors for `Compact` types with `--write`. Reads and checks generated + /// vectors with `--read`. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, +} + +impl Command { + /// Execute the command + pub async fn execute(self) -> eyre::Result<()> { + match self.command { + Subcommands::Tables { names } => { + tables::generate_vectors(names)?; + } + Subcommands::Compact { write, .. } => { + compact_types!( + regular: [ + TxL1Message + ], identifier: [] + ); + + if write { + compact::generate_vectors_with(ETH_GENERATE_VECTORS)?; + compact::generate_vectors_with(GENERATE_VECTORS)?; + } else { + compact::read_vectors_with(ETH_READ_VECTORS)?; + compact::read_vectors_with(READ_VECTORS)?; + } + } + } + Ok(()) + } +}
diff --git reth/crates/scroll/cli/src/lib.rs scroll-reth/crates/scroll/cli/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0ecd5408e63b4742cec9493fb23f3527d7c9f86d --- /dev/null +++ scroll-reth/crates/scroll/cli/src/lib.rs @@ -0,0 +1,121 @@ +//! Scroll CLI implementation. + +mod app; +pub use app::CliApp; + +mod commands; +pub use commands::Commands; + +mod spec; +pub use spec::ScrollChainSpecParser; + +use clap::{value_parser, Parser}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::{launcher::FnLauncher, node::NoArgs}; +use reth_cli_runner::CliRunner; +use reth_db::DatabaseEnv; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; +use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_scroll_chainspec::ScrollChainSpec; +use std::{ffi::OsString, fmt, future::Future, sync::Arc}; + +/// The main scroll cli interface. +/// +/// This is the entrypoint to the executable. +#[derive(Debug, Parser)] +#[command(author, version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Scroll Reth", long_about = None +)] +pub struct Cli<Spec: ChainSpecParser = ScrollChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs> +{ + /// The command to run + #[command(subcommand)] + command: Commands<Spec, Ext>, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = Spec::help_message(), + default_value = Spec::SUPPORTED_CHAINS[0], + value_parser = Spec::parser(), + global = true, + )] + chain: Arc<Spec::ChainSpec>, + + /// Add a new instance of a node. + /// + /// Configures the ports of the node to avoid conflicts with the defaults. + /// This is useful for running multiple nodes on the same machine. + /// + /// Max number of instances is 200. It is chosen in a way so that it's not possible to have + /// port numbers that conflict with each other. + /// + /// Changes to the following port numbers: + /// - `DISCOVERY_PORT`: default + `instance` - 1 + /// - `AUTH_PORT`: default + `instance` * 100 - 100 + /// - `HTTP_RPC_PORT`: default - `instance` + 1 + /// - `WS_RPC_PORT`: default + `instance` * 2 - 2 + #[arg(long, value_name = "INSTANCE", global = true, default_value_t = 1, value_parser = value_parser!(u16).range(..=200) + )] + instance: u16, + + #[command(flatten)] + logs: LogArgs, +} + +impl Cli { + /// Parsers only the default CLI arguments + pub fn parse_args() -> Self { + Self::parse() + } + + /// Parsers only the default CLI arguments from the given iterator + pub fn try_parse_args_from<I, T>(itr: I) -> Result<Self, clap::error::Error> + where + I: IntoIterator<Item = T>, + T: Into<OsString> + Clone, + { + Self::try_parse_from(itr) + } +} + +impl<C, Ext> Cli<C, Ext> +where + C: ChainSpecParser<ChainSpec = ScrollChainSpec>, + Ext: clap::Args + fmt::Debug, +{ + /// Configures the CLI and returns a [`CliApp`] instance. + /// + /// This method is used to prepare the CLI for execution by wrapping it in a + /// [`CliApp`] that can be further configured before running. + pub fn configure(self) -> CliApp<C, Ext> { + CliApp::new(self) + } + + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run<L, Fut>(self, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut, + Fut: Future<Output = eyre::Result<()>>, + { + self.with_runner(CliRunner::try_default_runtime()?, launcher) + } + + /// Execute the configured cli command with the provided [`CliRunner`]. + pub fn with_runner<L, Fut>(self, runner: CliRunner, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut, + Fut: Future<Output = eyre::Result<()>>, + { + let mut this = self.configure(); + this.set_runner(runner); + this.run(FnLauncher::new::<C, Ext>(async move |builder, chain_spec| { + launcher(builder, chain_spec).await + })) + } +}
diff --git reth/crates/scroll/cli/src/spec.rs scroll-reth/crates/scroll/cli/src/spec.rs new file mode 100644 index 0000000000000000000000000000000000000000..d7d8a22b43232330bcdf2b042b131684a9c9e38d --- /dev/null +++ scroll-reth/crates/scroll/cli/src/spec.rs @@ -0,0 +1,21 @@ +use reth_cli::chainspec::{parse_genesis, ChainSpecParser}; +use reth_scroll_chainspec::{ScrollChainSpec, SCROLL_DEV, SCROLL_MAINNET, SCROLL_SEPOLIA}; +use std::sync::Arc; + +/// The parser for the Scroll chain specification. +#[derive(Debug, Clone)] +pub struct ScrollChainSpecParser; + +impl ChainSpecParser for ScrollChainSpecParser { + type ChainSpec = ScrollChainSpec; + const SUPPORTED_CHAINS: &'static [&'static str] = &["dev", "scroll-mainnet", "scroll-sepolia"]; + + fn parse(s: &str) -> eyre::Result<Arc<Self::ChainSpec>> { + Ok(match s { + "dev" => SCROLL_DEV.clone(), + "scroll-mainnet" => SCROLL_MAINNET.clone(), + "scroll-sepolia" => SCROLL_SEPOLIA.clone(), + _ => Arc::new(ScrollChainSpec::from_custom_genesis(parse_genesis(s)?)), + }) + } +}
diff --git reth/crates/scroll/consensus/Cargo.toml scroll-reth/crates/scroll/consensus/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0456665bafab2a9bd3ba95d5bcdb4a2d31d79a5c --- /dev/null +++ scroll-reth/crates/scroll/consensus/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "reth-scroll-consensus" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-consensus.workspace = true +alloy-primitives = { workspace = true, features = ["getrandom"] } + +# reth +reth-chainspec.workspace = true +reth-consensus.workspace = true +reth-consensus-common.workspace = true +reth-ethereum-consensus.workspace = true +reth-execution-types.workspace = true +reth-primitives-traits.workspace = true + +# scroll +reth-scroll-primitives = { workspace = true, default-features = false } +scroll-alloy-consensus.workspace = true +scroll-alloy-hardforks.workspace = true + +# misc +thiserror.workspace = true +tracing.workspace = true + +[package.metadata.cargo-udeps.ignore] +normal = ["reth-primitives"] + +[dev-dependencies] +reth-scroll-chainspec.workspace = true
diff --git reth/crates/scroll/consensus/src/constants.rs scroll-reth/crates/scroll/consensus/src/constants.rs new file mode 100644 index 0000000000000000000000000000000000000000..188aeb896d5c9277f1236d62c3f25b3f73a59e9e --- /dev/null +++ scroll-reth/crates/scroll/consensus/src/constants.rs @@ -0,0 +1,13 @@ +use alloy_primitives::U256; + +/// The maximum value Rollup fee. +pub const MAX_ROLLUP_FEE: U256 = U256::from_limbs([u64::MAX, 0, 0, 0]); + +/// The block difficulty for in turn signing in the Clique consensus. +pub const CLIQUE_IN_TURN_DIFFICULTY: U256 = U256::from_limbs([2, 0, 0, 0]); + +/// The block difficulty for out of turn signing in the Clique consensus. +pub const CLIQUE_NO_TURN_DIFFICULTY: U256 = U256::from_limbs([1, 0, 0, 0]); + +/// Maximum allowed base fee. We would only go above this if L1 base fee hits 2931 Gwei. +pub const SCROLL_MAXIMUM_BASE_FEE: u64 = 10000000000;
diff --git reth/crates/scroll/consensus/src/error.rs scroll-reth/crates/scroll/consensus/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..09eca6d9b715438b9aefc64569c9fad2d63b1ee2 --- /dev/null +++ scroll-reth/crates/scroll/consensus/src/error.rs @@ -0,0 +1,63 @@ +use crate::constants::SCROLL_MAXIMUM_BASE_FEE; + +use alloy_primitives::{Address, B256, B64, U256}; +use reth_consensus::ConsensusError; + +/// Scroll consensus error. +#[derive(Debug, Clone, thiserror::Error)] +pub enum ScrollConsensusError { + /// L1 [`ConsensusError`], that also occurs on L2. + #[error(transparent)] + Eth(#[from] ConsensusError), + /// Invalid L1 messages order. + #[error("invalid L1 message order")] + InvalidL1MessageOrder, + /// Block has non zero coinbase. + #[error("block coinbase not zero: {0}")] + CoinbaseNotZero(Address), + /// Block has non zero nonce. + #[error("block nonce not zero: {0:?}")] + NonceNotZero(Option<B64>), + /// Block has invalid clique nonce. + #[error("block nonce should be 0x0 or 0xffffffffffffffff: {0:?}")] + InvalidCliqueNonce(Option<B64>), + /// Block has non zero mix hash. + #[error("block mix hash not zero: {0:?}")] + MixHashNotZero(Option<B256>), + /// Block difficulty is not one. + #[error("block difficulty not one: {0}")] + DifficultyNotOne(U256), + /// Block has invalid clique difficulty. + #[error("block difficulty should be 1 or 2: {0}")] + InvalidCliqueDifficulty(U256), + /// Block extra data missing vanity. + #[error("block extra data missing vanity")] + MissingVanity, + /// Block extra data missing signature. + #[error("block extra data missing signature")] + MissingSignature, + /// Block extra data with invalid checkpoint signers. + #[error("block extra data contains invalid checkpoint signers")] + InvalidCheckpointSigners, + /// Block base fee present before Curie. + #[error("block base fee is set before Curie fork activation")] + UnexpectedBaseFee, + /// Block base fee over limit. + #[error("block base fee is over limit of {SCROLL_MAXIMUM_BASE_FEE}")] + BaseFeeOverLimit, + /// Block body has non-empty withdrawals list. + #[error("non-empty block body withdrawals list")] + WithdrawalsNonEmpty, + /// Chain spec yielded unexpected blob params. + #[error("unexpected blob params at timestamp")] + UnexpectedBlobParams, +} + +impl From<ScrollConsensusError> for ConsensusError { + fn from(value: ScrollConsensusError) -> Self { + match value { + ScrollConsensusError::Eth(eth) => eth, + err => Self::Other(err.to_string()), + } + } +}
diff --git reth/crates/scroll/consensus/src/lib.rs scroll-reth/crates/scroll/consensus/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e29d6c461a8e40840ee5ff0c5fa39462eedb4cc --- /dev/null +++ scroll-reth/crates/scroll/consensus/src/lib.rs @@ -0,0 +1,14 @@ +//! Scroll consensus implementation. + +extern crate alloc; + +mod constants; +pub use constants::{ + CLIQUE_IN_TURN_DIFFICULTY, CLIQUE_NO_TURN_DIFFICULTY, MAX_ROLLUP_FEE, SCROLL_MAXIMUM_BASE_FEE, +}; + +mod error; +pub use error::ScrollConsensusError; + +mod validation; +pub use validation::ScrollBeaconConsensus;
diff --git reth/crates/scroll/consensus/src/validation.rs scroll-reth/crates/scroll/consensus/src/validation.rs new file mode 100644 index 0000000000000000000000000000000000000000..786a47b10fa6dcdbbd7423c570af842886b40484 --- /dev/null +++ scroll-reth/crates/scroll/consensus/src/validation.rs @@ -0,0 +1,842 @@ +use crate::{ + constants::SCROLL_MAXIMUM_BASE_FEE, error::ScrollConsensusError, CLIQUE_IN_TURN_DIFFICULTY, + CLIQUE_NO_TURN_DIFFICULTY, +}; +use alloc::sync::Arc; + +use alloy_consensus::{BlockHeader as _, TxReceipt, EMPTY_OMMER_ROOT_HASH}; +use alloy_primitives::{b64, Address, B256, B64, U256}; +use core::fmt::Debug; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_consensus::{ + validate_state_root, Consensus, ConsensusError, FullConsensus, HeaderValidator, +}; +use reth_consensus_common::validation::{ + validate_against_parent_hash_number, validate_body_against_header, validate_header_gas, +}; +use reth_execution_types::BlockExecutionResult; +use reth_primitives_traits::{ + constants::{GAS_LIMIT_BOUND_DIVISOR, MINIMUM_GAS_LIMIT}, + receipt::gas_spent_by_transactions, + Block, BlockBody, BlockHeader, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, + SealedHeader, SignedTransaction, +}; +use reth_scroll_primitives::ScrollReceipt; +use scroll_alloy_consensus::ScrollTransaction; +use scroll_alloy_hardforks::ScrollHardforks; + +/// Scroll consensus implementation. +/// +/// Provides basic checks as outlined in the execution specs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ScrollBeaconConsensus<ChainSpec> { + /// Configuration + chain_spec: Arc<ChainSpec>, +} + +impl<ChainSpec> ScrollBeaconConsensus<ChainSpec> { + /// Create a new instance of [`ScrollBeaconConsensus`] + pub const fn new(chain_spec: Arc<ChainSpec>) -> Self { + Self { chain_spec } + } +} + +impl< + ChainSpec: EthChainSpec + ScrollHardforks, + N: NodePrimitives<Receipt = ScrollReceipt, SignedTx: ScrollTransaction>, + > FullConsensus<N> for ScrollBeaconConsensus<ChainSpec> +{ + fn validate_block_post_execution( + &self, + block: &RecoveredBlock<N::Block>, + result: &BlockExecutionResult<N::Receipt>, + ) -> Result<(), ConsensusError> { + // verify the block gas used + let cumulative_gas_used = + result.receipts.last().map(|r| r.cumulative_gas_used()).unwrap_or(0); + if block.gas_used() != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used() }, + gas_spent_by_tx: gas_spent_by_transactions(&result.receipts), + }); + } + + // verify the receipts logs bloom and root + #[allow(clippy::collapsible_if)] + if self.chain_spec.is_byzantium_active_at_block(block.header().number()) { + if let Err(error) = reth_ethereum_consensus::verify_receipts( + block.header().receipts_root(), + block.header().logs_bloom(), + &result.receipts, + ) { + tracing::debug!( + %error, + ?result.receipts, + header_receipt_root = ?block.header().receipts_root(), + header_bloom = ?block.header().logs_bloom(), + "failed to verify receipts" + ); + return Err(error); + } + } + + Ok(()) + } +} + +/// Following fields should be checked on body: +/// - Verify no ommers are present and hash to the header ommer root. +/// - Verify transactions trie root is valid. +/// - Validate L1 messages: should be at the start of the list of transactions and been continuous +/// in regard to the queue index. +impl<ChainSpec, B> Consensus<B> for ScrollBeaconConsensus<ChainSpec> +where + B: Block, + <B::Body as BlockBody>::Transaction: ScrollTransaction, + ChainSpec: EthChainSpec + ScrollHardforks, +{ + type Error = ConsensusError; + + fn validate_body_against_header( + &self, + body: &B::Body, + header: &SealedHeader<B::Header>, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header.header()) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock<B>) -> Result<(), ConsensusError> { + // Check no ommers. + let ommers_len = block.body().ommers().map(|o| o.len()).unwrap_or_default(); + if ommers_len > 0 { + return Err(ConsensusError::Other("uncles not allowed".to_string())) + } + + // Check ommers hash + let ommers_hash = block.body().calculate_ommers_root(); + if Some(block.ommers_hash()) != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: block.ommers_hash(), + } + .into(), + )) + } + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + // Check withdrawals are empty + if block.body().withdrawals().is_some() { + return Err(ConsensusError::Other(ScrollConsensusError::WithdrawalsNonEmpty.to_string())) + } + + // Check L1 messages. + let ts = block.header().timestamp(); + validate_l1_messages( + block.body().transactions(), + self.chain_spec.is_euclid_v2_active_at_timestamp(ts), + )?; + + Ok(()) + } +} + +impl<ChainSpec: EthChainSpec + ScrollHardforks, H: BlockHeader> HeaderValidator<H> + for ScrollBeaconConsensus<ChainSpec> +{ + fn validate_header(&self, header: &SealedHeader<H>) -> Result<(), ConsensusError> { + validate_header_timestamp(header.header())?; + validate_header_fields(header.header(), &self.chain_spec)?; + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec)?; + Ok(()) + } + + fn validate_header_against_parent( + &self, + header: &SealedHeader<H>, + parent: &SealedHeader<H>, + ) -> Result<(), ConsensusError> { + validate_against_parent_hash_number(header.header(), parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; + validate_against_parent_gas_limit(header.header(), parent.header())?; + + // ensure that the blob gas fields for this block + if self.chain_spec.blob_params_at_timestamp(header.timestamp()).is_some() { + return Err(ConsensusError::Other( + ScrollConsensusError::UnexpectedBlobParams.to_string(), + )) + } + + Ok(()) + } + + fn validate_state_root(&self, header: &H, root: B256) -> Result<(), ConsensusError> { + if self.chain_spec.is_euclid_active_at_timestamp(header.timestamp()) { + validate_state_root(header, root)?; + } + + Ok(()) + } +} + +#[inline] +fn validate_header_fields<H: BlockHeader, ChainSpec: EthChainSpec + ScrollHardforks>( + header: &H, + chain_spec: ChainSpec, +) -> Result<(), ScrollConsensusError> { + // Common checks to pre and post Euclid v2. + if header.ommers_hash() != EMPTY_OMMER_ROOT_HASH { + return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty.into()) + } + if header.mix_hash() != Some(B256::ZERO) { + return Err(ScrollConsensusError::MixHashNotZero(header.mix_hash())) + } + + if chain_spec.is_euclid_v2_active_at_timestamp(header.timestamp()) { + verify_header_fields_post_euclid_v2(header)?; + } else { + let clique_config = + chain_spec.genesis().config.clique.expect("clique config required pre euclid v2"); + let epoch = clique_config.epoch.expect("epoch required pre euclid v2"); + verify_header_fields_pre_euclid_v2(header, epoch)?; + } + + Ok(()) +} + +/// Verify the header's field for post Euclid v2 blocks. +#[inline] +fn verify_header_fields_post_euclid_v2<H: BlockHeader>( + header: &H, +) -> Result<(), ScrollConsensusError> { + if header.beneficiary() != Address::ZERO { + return Err(ScrollConsensusError::CoinbaseNotZero(header.beneficiary())) + } + if header.nonce() != Some(B64::ZERO) { + return Err(ScrollConsensusError::NonceNotZero(header.nonce())) + } + if header.difficulty() != U256::ONE { + return Err(ScrollConsensusError::DifficultyNotOne(header.difficulty())) + } + if !header.extra_data().is_empty() { + return Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data().len() }.into()) + } + + Ok(()) +} + +/// Verify the header's field for pre Euclid v2 blocks. +#[inline] +fn verify_header_fields_pre_euclid_v2<H: BlockHeader>( + header: &H, + epoch: u64, +) -> Result<(), ScrollConsensusError> { + let is_checkpoint = header.number().is_multiple_of(epoch); + if is_checkpoint && header.beneficiary() != Address::ZERO { + return Err(ScrollConsensusError::CoinbaseNotZero(header.beneficiary())) + } + if header.nonce() != Some(B64::ZERO) && header.nonce() != Some(b64!("ffffffffffffffff")) { + return Err(ScrollConsensusError::InvalidCliqueNonce(header.nonce())) + } + if is_checkpoint && header.nonce() != Some(B64::ZERO) { + return Err(ScrollConsensusError::NonceNotZero(header.nonce())) + } + if header.extra_data().len() < 32 { + return Err(ScrollConsensusError::MissingVanity) + } + if header.extra_data().len() < 32 + 65 { + return Err(ScrollConsensusError::MissingSignature) + } + let signer_bytes = header.extra_data().len() - 32 - 65; + if !is_checkpoint && signer_bytes > 0 { + return Err(ScrollConsensusError::InvalidCheckpointSigners) + } + let difficulty = header.difficulty(); + if difficulty != CLIQUE_IN_TURN_DIFFICULTY && difficulty != CLIQUE_NO_TURN_DIFFICULTY { + return Err(ScrollConsensusError::InvalidCliqueDifficulty(difficulty)) + } + + Ok(()) +} + +/// Validates the timestamp of the header, which should not be in the future. +#[inline] +fn validate_header_timestamp<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> { + let now = std::time::SystemTime::now(); + let since_unix_epoch = now.duration_since(std::time::SystemTime::UNIX_EPOCH).unwrap().as_secs(); + if header.timestamp() > since_unix_epoch { + return Err(ConsensusError::TimestampIsInPast { + parent_timestamp: since_unix_epoch, + timestamp: header.timestamp(), + }) + } + Ok(()) +} + +/// Ensure the EIP-1559 base fee is set if the Curie hardfork is active. +#[inline] +fn validate_header_base_fee<H: BlockHeader, ChainSpec: ScrollHardforks>( + header: &H, + chain_spec: &ChainSpec, +) -> Result<(), ScrollConsensusError> { + if chain_spec.is_curie_active_at_block(header.number()) { + if header.base_fee_per_gas().is_none() { + return Err(ConsensusError::BaseFeeMissing.into()) + } + // note: we do not verify L2 base fee, the sequencer has the + // right to set any base fee below the maximum. L2 base fee + // is not subject to L2 consensus or zk verification. + if header.base_fee_per_gas().expect("checked") > SCROLL_MAXIMUM_BASE_FEE { + return Err(ScrollConsensusError::BaseFeeOverLimit) + } + } + if !chain_spec.is_curie_active_at_block(header.number()) && header.base_fee_per_gas().is_some() + { + return Err(ScrollConsensusError::UnexpectedBaseFee) + } + Ok(()) +} + +/// Validates the timestamp against the parent to make sure it is in the past. +/// In Scroll, we can have parent.timestamp == header.timestamp which is why +/// we modify this validation compared to +/// [`reth_consensus_common::validation::validate_against_parent_timestamp`]. +#[inline] +fn validate_against_parent_timestamp<H: BlockHeader>( + header: &H, + parent: &H, +) -> Result<(), ConsensusError> { + if header.timestamp() < parent.timestamp() { + return Err(ConsensusError::TimestampIsInPast { + parent_timestamp: parent.timestamp(), + timestamp: header.timestamp(), + }) + } + Ok(()) +} + +/// Validates the gas limit of the block against the parent. +#[inline] +fn validate_against_parent_gas_limit<H: BlockHeader>( + header: &H, + parent: &H, +) -> Result<(), ConsensusError> { + let diff = header.gas_limit().abs_diff(parent.gas_limit()); + let limit = parent.gas_limit() / GAS_LIMIT_BOUND_DIVISOR; + if diff > limit { + return if header.gas_limit() > parent.gas_limit() { + Err(ConsensusError::GasLimitInvalidIncrease { + parent_gas_limit: parent.gas_limit(), + child_gas_limit: header.gas_limit(), + }) + } else { + Err(ConsensusError::GasLimitInvalidDecrease { + parent_gas_limit: parent.gas_limit(), + child_gas_limit: header.gas_limit(), + }) + } + } + // Check that the gas limit is above the minimum allowed gas limit. + if header.gas_limit() < MINIMUM_GAS_LIMIT { + return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit() }) + } + Ok(()) +} + +/// Validate the L1 messages by checking they are only present that the start of the block and only +/// have increasing queue index. +#[inline] +fn validate_l1_messages<Tx: SignedTransaction + ScrollTransaction>( + txs: &[Tx], + is_euclid_v2: bool, +) -> Result<(), ScrollConsensusError> { + // Check L1 messages are only at the start of the block and correctly ordered. + let mut saw_l2_transaction = false; + let mut queue_index = txs + .iter() + .find(|tx| tx.is_l1_message()) + .and_then(|tx| tx.queue_index()) + .unwrap_or_default(); + + // starting at EuclidV2, we don't skip L1 messages. + let l1_message_index_check: fn(u64, u64) -> bool = if is_euclid_v2 { + |tx_queue_index, queue_index| tx_queue_index != queue_index + } else { + |tx_queue_index, queue_index| tx_queue_index < queue_index + }; + + for tx in txs { + // Check index is strictly increasing pre EuclidV2 and sequential post EuclidV2. + if tx.is_l1_message() { + let tx_queue_index = tx.queue_index().expect("is_l1_message"); + if l1_message_index_check(tx_queue_index, queue_index) { + return Err(ScrollConsensusError::InvalidL1MessageOrder); + } + queue_index = tx_queue_index + 1; + } + + // Check correct ordering. + if tx.is_l1_message() && saw_l2_transaction { + return Err(ScrollConsensusError::InvalidL1MessageOrder); + } + saw_l2_transaction = !tx.is_l1_message(); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ScrollConsensusError; + + use alloy_consensus::{Header, Signed, TxEip1559}; + use alloy_primitives::{b64, Address, Bloom, Bytes, Signature, B256, U256}; + use reth_consensus::ConsensusError; + use reth_primitives_traits::constants::{GAS_LIMIT_BOUND_DIVISOR, MINIMUM_GAS_LIMIT}; + use reth_scroll_chainspec::SCROLL_MAINNET; + use scroll_alloy_consensus::{ScrollTxEnvelope, TxL1Message}; + + fn create_test_header() -> Header { + Header { + parent_hash: B256::random(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: Address::ZERO, + state_root: B256::random(), + transactions_root: B256::random(), + receipts_root: B256::random(), + logs_bloom: Bloom::default(), + difficulty: U256::ONE, + number: 1, + gas_limit: 30000000, + gas_used: 0, + timestamp: 1000, + extra_data: Bytes::new(), + mix_hash: B256::ZERO, + nonce: B64::ZERO, + base_fee_per_gas: None, + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + } + } + + #[test] + fn test_validate_header_timestamp_success() { + let header = create_test_header(); + assert!(validate_header_timestamp(&header).is_ok()); + } + + #[test] + fn test_validate_header_timestamp_future() { + let mut header = create_test_header(); + // timestamp in the future. + header.timestamp = u64::MAX; + + let result = validate_header_timestamp(&header); + assert!(matches!(result, Err(ConsensusError::TimestampIsInPast { .. }))); + } + + #[test] + fn test_verify_header_fields_post_euclid_v2_success() { + let header = create_test_header(); + assert!(verify_header_fields_post_euclid_v2(&header).is_ok()); + } + + #[test] + fn test_verify_header_fields_post_euclid_v2_coinbase_not_zero() { + let mut header = create_test_header(); + header.beneficiary = Address::random(); + + let result = verify_header_fields_post_euclid_v2(&header); + assert!(matches!(result, Err(ScrollConsensusError::CoinbaseNotZero(_)))); + } + + #[test] + fn test_verify_header_fields_post_euclid_v2_nonce_not_zero() { + let mut header = create_test_header(); + header.nonce = b64!("0123456789abcdef"); + + let result = verify_header_fields_post_euclid_v2(&header); + assert!(matches!(result, Err(ScrollConsensusError::NonceNotZero(_)))); + } + + #[test] + fn test_verify_header_fields_post_euclid_v2_difficulty_not_one() { + let mut header = create_test_header(); + header.difficulty = U256::from(2); + + let result = verify_header_fields_post_euclid_v2(&header); + assert!(matches!(result, Err(ScrollConsensusError::DifficultyNotOne(_)))); + } + + #[test] + fn test_verify_header_fields_post_euclid_v2_extra_data_not_empty() { + let mut header = create_test_header(); + header.extra_data = Bytes::from(vec![1, 2, 3]); + + let result = verify_header_fields_post_euclid_v2(&header); + assert!(matches!( + result, + Err(ScrollConsensusError::Eth(ConsensusError::ExtraDataExceedsMax { .. })) + )); + } + + #[test] + fn test_verify_header_fields_pre_euclid_v2_success() { + let mut header = create_test_header(); + // valid extra data (32 bytes vanity + 65 bytes signature). + let mut extra_data = vec![0u8; 32]; + extra_data.extend_from_slice(&[0u8; 65]); + header.extra_data = Bytes::from(extra_data); + + assert!(verify_header_fields_pre_euclid_v2(&header, 30000).is_ok()); + } + + #[test] + fn test_verify_header_fields_pre_euclid_v2_checkpoint_coinbase_not_zero() { + let mut header = create_test_header(); + // checkpoint block. + header.number = 0; + header.beneficiary = Address::random(); + let mut extra_data = vec![0u8; 32]; + extra_data.extend_from_slice(&[0u8; 65]); + header.extra_data = Bytes::from(extra_data); + + let result = verify_header_fields_pre_euclid_v2(&header, 30000); + assert!(matches!(result, Err(ScrollConsensusError::CoinbaseNotZero(_)))); + } + + #[test] + fn test_verify_header_fields_pre_euclid_v2_invalid_nonce() { + let mut header = create_test_header(); + // invalid nonce. + header.nonce = b64!("1234567890abcdef"); + let mut extra_data = vec![0u8; 32]; + extra_data.extend_from_slice(&[0u8; 65]); + header.extra_data = Bytes::from(extra_data); + + let result = verify_header_fields_pre_euclid_v2(&header, 30000); + assert!(matches!(result, Err(ScrollConsensusError::InvalidCliqueNonce(_)))); + } + + #[test] + fn test_verify_header_fields_pre_euclid_v2_missing_vanity() { + let mut header = create_test_header(); + // vanity too short. + header.extra_data = Bytes::from(vec![0u8; 31]); + + let result = verify_header_fields_pre_euclid_v2(&header, 30000); + assert!(matches!(result, Err(ScrollConsensusError::MissingVanity))); + } + + #[test] + fn test_verify_header_fields_pre_euclid_v2_missing_signature() { + let mut header = create_test_header(); + // signature too short. + header.extra_data = Bytes::from(vec![0u8; 32]); + + let result = verify_header_fields_pre_euclid_v2(&header, 30000); + assert!(matches!(result, Err(ScrollConsensusError::MissingSignature))); + } + + #[test] + fn test_verify_header_fields_pre_euclid_v2_invalid_difficulty() { + let mut header = create_test_header(); + // invalid difficulty. + header.difficulty = U256::from(3); + let mut extra_data = vec![0u8; 32]; + extra_data.extend_from_slice(&[0u8; 65]); + header.extra_data = Bytes::from(extra_data); + + let result = verify_header_fields_pre_euclid_v2(&header, 30000); + assert!(matches!(result, Err(ScrollConsensusError::InvalidCliqueDifficulty(_)))); + } + + #[test] + fn test_validate_against_parent_timestamp_success() { + let parent = create_test_header(); + let mut header = create_test_header(); + header.timestamp = parent.timestamp + 1; + + assert!(validate_against_parent_timestamp(&header, &parent).is_ok()); + } + + #[test] + fn test_validate_against_parent_timestamp_same_time() { + let parent = create_test_header(); + let header = create_test_header(); + + assert!(validate_against_parent_timestamp(&header, &parent).is_ok()); + } + + #[test] + fn test_validate_against_parent_timestamp_in_past() { + let parent = create_test_header(); + let mut header = create_test_header(); + header.timestamp = parent.timestamp - 1; + + let result = validate_against_parent_timestamp(&header, &parent); + assert!(matches!(result, Err(ConsensusError::TimestampIsInPast { .. }))); + } + + #[test] + fn test_validate_against_parent_gas_limit_success() { + let parent = create_test_header(); + let mut header = create_test_header(); + // small gas increase. + header.gas_limit = parent.gas_limit + 100; + + assert!(validate_against_parent_gas_limit(&header, &parent).is_ok()); + } + + #[test] + fn test_validate_against_parent_gas_limit_too_high_increase() { + let parent = create_test_header(); + let mut header = create_test_header(); + header.gas_limit = parent.gas_limit + parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1; + + let result = validate_against_parent_gas_limit(&header, &parent); + assert!(matches!(result, Err(ConsensusError::GasLimitInvalidIncrease { .. }))); + } + + #[test] + fn test_validate_against_parent_gas_limit_too_high_decrease() { + let parent = create_test_header(); + let mut header = create_test_header(); + header.gas_limit = parent.gas_limit - parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1; + + let result = validate_against_parent_gas_limit(&header, &parent); + assert!(matches!(result, Err(ConsensusError::GasLimitInvalidDecrease { .. }))); + } + + #[test] + fn test_validate_against_parent_gas_limit_below_minimum() { + let mut parent = create_test_header(); + let mut header = create_test_header(); + parent.gas_limit = MINIMUM_GAS_LIMIT + 1; + header.gas_limit = MINIMUM_GAS_LIMIT - 1; + + let result = validate_against_parent_gas_limit(&header, &parent); + dbg!(&result); + assert!(matches!(result, Err(ConsensusError::GasLimitInvalidMinimum { .. }))); + } + + #[test] + fn test_validate_l1_messages_success() { + let txs: Vec<ScrollTxEnvelope> = vec![ + TxL1Message { queue_index: 0, ..Default::default() }.into(), + TxL1Message { queue_index: 1, ..Default::default() }.into(), + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + ]; + + assert!(validate_l1_messages(&txs, true).is_ok()); + assert!(validate_l1_messages(&txs, false).is_ok()); + } + + #[test] + fn test_validate_l1_messages_empty() { + let txs: Vec<ScrollTxEnvelope> = vec![]; + assert!(validate_l1_messages(&txs, true).is_ok()); + assert!(validate_l1_messages(&txs, false).is_ok()); + } + + #[test] + fn test_validate_l1_messages_only_l2() { + let txs: Vec<ScrollTxEnvelope> = vec![ + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + ]; + + assert!(validate_l1_messages(&txs, true).is_ok()); + assert!(validate_l1_messages(&txs, false).is_ok()); + } + + #[test] + fn test_validate_l1_messages_invalid_order() { + let txs: Vec<ScrollTxEnvelope> = vec![ + Signed::new_unchecked( + TxEip1559::default(), + Signature::new(U256::ZERO, U256::ZERO, false), + B256::random(), + ) + .into(), + TxL1Message { queue_index: 0, ..Default::default() }.into(), + ]; + + let result = validate_l1_messages(&txs, true); + assert!(matches!(result, Err(ScrollConsensusError::InvalidL1MessageOrder))); + let result = validate_l1_messages(&txs, false); + assert!(matches!(result, Err(ScrollConsensusError::InvalidL1MessageOrder))); + } + + #[test] + fn test_validate_l1_messages_non_sequential_queue_index() { + let txs: Vec<ScrollTxEnvelope> = vec![ + TxL1Message { queue_index: 0, ..Default::default() }.into(), + TxL1Message { queue_index: 2, ..Default::default() }.into(), + ]; + + // ok as it's not decreasing. + assert!(validate_l1_messages(&txs, false).is_ok()); + // not ok as it's not sequential. + let result = validate_l1_messages(&txs, true); + assert!(matches!(result, Err(ScrollConsensusError::InvalidL1MessageOrder))); + } + + #[test] + fn test_validate_l1_messages_decreasing_queue_index() { + let txs: Vec<ScrollTxEnvelope> = vec![ + TxL1Message { queue_index: 1, ..Default::default() }.into(), + TxL1Message { queue_index: 0, ..Default::default() }.into(), + ]; + + let result = validate_l1_messages(&txs, true); + assert!(matches!(result, Err(ScrollConsensusError::InvalidL1MessageOrder))); + let result = validate_l1_messages(&txs, false); + assert!(matches!(result, Err(ScrollConsensusError::InvalidL1MessageOrder))); + } + + #[test] + fn test_validate_header_base_fee_before_curie() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // pre Curie. + header.number = 500; + header.base_fee_per_gas = Some(1000000000); + + let result = validate_header_base_fee(&header, &chain_spec); + assert!(matches!(result, Err(ScrollConsensusError::UnexpectedBaseFee))); + } + + #[test] + fn test_validate_header_base_fee_after_curie_missing() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // post Curie. + header.number = 7096837; + header.base_fee_per_gas = None; + + let result = validate_header_base_fee(&header, &chain_spec); + assert!(matches!(result, Err(ScrollConsensusError::Eth(ConsensusError::BaseFeeMissing)))); + } + + #[test] + fn test_validate_header_base_fee_after_curie_over_limit() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // post Curie. + header.number = 7096837; + header.base_fee_per_gas = Some(SCROLL_MAXIMUM_BASE_FEE + 1); + + let result = validate_header_base_fee(&header, &chain_spec); + assert!(matches!(result, Err(ScrollConsensusError::BaseFeeOverLimit))); + } + + #[test] + fn test_validate_header_base_fee_after_curie_valid() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // post Curie. + header.number = 7096837; + header.base_fee_per_gas = Some(1000000000); + + let result = validate_header_base_fee(&header, &chain_spec); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_header_fields_pre_euclid_v2() { + let mut header = create_test_header(); + // pre Euclid v2. + header.timestamp = 1745305199; + // valid extra data for pre-euclid v2. + let mut extra_data = vec![0u8; 32]; + extra_data.extend_from_slice(&[0u8; 65]); + header.extra_data = Bytes::from(extra_data); + + assert!(verify_header_fields_pre_euclid_v2(&header, 30000).is_ok()); + } + + #[test] + fn test_validate_header_fields_post_euclid_v2() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // post Euclid v2. + header.timestamp = 1745305201; + + assert!(validate_header_fields(&header, &chain_spec).is_ok()); + } + + #[test] + fn test_validate_header_fields_mix_hash_not_zero() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // invalid mix hash. + header.mix_hash = B256::random(); + + let result = validate_header_fields(&header, &chain_spec); + assert!(matches!(result, Err(ScrollConsensusError::MixHashNotZero(_)))); + } + + #[test] + fn test_validate_header_fields_ommers_hash_not_empty() { + let chain_spec = SCROLL_MAINNET.clone(); + + let mut header = create_test_header(); + // invalid ommer hash. + header.ommers_hash = B256::random(); + + let result = validate_header_fields(&header, &chain_spec); + assert!(matches!( + result, + Err(ScrollConsensusError::Eth(ConsensusError::TheMergeOmmerRootIsNotEmpty)) + )); + } +}
diff --git reth/crates/scroll/engine-primitives/Cargo.toml scroll-reth/crates/scroll/engine-primitives/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..bb24a42a903ce53184ed40d4698942b1ec69b50b --- /dev/null +++ scroll-reth/crates/scroll/engine-primitives/Cargo.toml @@ -0,0 +1,65 @@ +[package] +name = "reth-scroll-engine-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-chain-state.workspace = true +reth-chainspec.workspace = true +reth-engine-primitives.workspace = true +reth-payload-builder.workspace = true +reth-payload-primitives = { workspace = true, features = ["scroll-alloy-traits"] } +reth-primitives = { workspace = true, features = ["serde-bincode-compat", "reth-codec"] } +reth-primitives-traits.workspace = true + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true +alloy-rpc-types-engine.workspace = true + +# scroll +reth-scroll-chainspec.workspace = true +reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } +scroll-alloy-rpc-types-engine.workspace = true +scroll-alloy-hardforks.workspace = true + +# misc +serde.workspace = true +sha2 = { workspace = true, default-features = false } + +[dev-dependencies] +alloy-primitives = { workspace = true, features = ["getrandom"] } +arbitrary.workspace = true +eyre.workspace = true +rand.workspace = true + +[features] +default = ["std"] +std = [ + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-rlp/std", + "alloy-rpc-types-engine/std", + "reth-chainspec/std", + "reth-engine-primitives/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "serde/std", + "sha2/std", + "reth-scroll-chainspec/std", + "scroll-alloy-hardforks/std", + "scroll-alloy-rpc-types-engine/std", + "reth-scroll-primitives/std", +]
diff --git reth/crates/scroll/engine-primitives/src/lib.rs scroll-reth/crates/scroll/engine-primitives/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..a82ec2581c09f531783073d2bb2682d94082797d --- /dev/null +++ scroll-reth/crates/scroll/engine-primitives/src/lib.rs @@ -0,0 +1,13 @@ +//! The engine primitives for Scroll. + +#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(not(feature = "std"))] +extern crate alloc as std; + +mod payload; +pub use payload::{ + try_into_block, ScrollBuiltPayload, ScrollEngineTypes, ScrollPayloadBuilderAttributes, + ScrollPayloadTypes, +}; + +extern crate alloc;
diff --git reth/crates/scroll/engine-primitives/src/payload/attributes.rs scroll-reth/crates/scroll/engine-primitives/src/payload/attributes.rs new file mode 100644 index 0000000000000000000000000000000000000000..1579845c09c5d71b65d284d7437d514dbf88f1f2 --- /dev/null +++ scroll-reth/crates/scroll/engine-primitives/src/payload/attributes.rs @@ -0,0 +1,219 @@ +//! Payload related types + +use alloc::vec::Vec; +use std::fmt::Debug; + +use alloy_eips::{eip2718::Decodable2718, eip4895::Withdrawals}; +use alloy_primitives::{keccak256, Address, B256}; +use alloy_rlp::Encodable; +use alloy_rpc_types_engine::PayloadId; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_primitives::transaction::WithEncoded; +use reth_scroll_primitives::ScrollTransactionSigned; +use scroll_alloy_rpc_types_engine::{BlockDataHint, ScrollPayloadAttributes}; + +/// Scroll Payload Builder Attributes +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct ScrollPayloadBuilderAttributes { + /// Inner ethereum payload builder attributes + pub payload_attributes: EthPayloadBuilderAttributes, + /// `NoTxPool` option for the generated payload + pub no_tx_pool: bool, + /// Decoded transactions and the original EIP-2718 encoded bytes as received in the payload + /// attributes. + pub transactions: Vec<WithEncoded<ScrollTransactionSigned>>, + /// The block data hint, used pre-Euclid by the block builder to derive the correct block + /// hash and post-Euclid by the sequencer to set the difficulty of the block. + pub block_data_hint: BlockDataHint, + /// The gas limit for the generated payload. + pub gas_limit: Option<u64>, +} + +impl PayloadBuilderAttributes for ScrollPayloadBuilderAttributes { + type RpcPayloadAttributes = ScrollPayloadAttributes; + type Error = alloy_rlp::Error; + + fn try_new( + parent: B256, + attributes: ScrollPayloadAttributes, + version: u8, + ) -> Result<Self, Self::Error> { + let id = payload_id_scroll(&parent, &attributes, version); + + let transactions = attributes + .transactions + .unwrap_or_default() + .into_iter() + .map(|data| { + let mut buf = data.as_ref(); + let tx = Decodable2718::decode_2718(&mut buf).map_err(alloy_rlp::Error::from)?; + + if !buf.is_empty() { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(WithEncoded::new(data, tx)) + }) + .collect::<Result<_, _>>()?; + + let payload_attributes = EthPayloadBuilderAttributes { + id, + parent, + timestamp: attributes.payload_attributes.timestamp, + suggested_fee_recipient: attributes.payload_attributes.suggested_fee_recipient, + prev_randao: attributes.payload_attributes.prev_randao, + withdrawals: attributes.payload_attributes.withdrawals.unwrap_or_default().into(), + parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, + }; + + Ok(Self { + payload_attributes, + no_tx_pool: attributes.no_tx_pool, + transactions, + block_data_hint: attributes.block_data_hint, + gas_limit: attributes.gas_limit, + }) + } + + fn payload_id(&self) -> PayloadId { + self.payload_attributes.id + } + + fn parent(&self) -> B256 { + self.payload_attributes.parent + } + + fn timestamp(&self) -> u64 { + self.payload_attributes.timestamp + } + + fn parent_beacon_block_root(&self) -> Option<B256> { + self.payload_attributes.parent_beacon_block_root + } + + fn suggested_fee_recipient(&self) -> Address { + self.payload_attributes.suggested_fee_recipient + } + + fn prev_randao(&self) -> B256 { + self.payload_attributes.prev_randao + } + + fn withdrawals(&self) -> &Withdrawals { + &self.payload_attributes.withdrawals + } +} + +/// Generates the payload id for the configured payload from the [`ScrollPayloadAttributes`]. +/// +/// Returns an 8-byte identifier by hashing the payload components with sha256 hash. +pub(crate) fn payload_id_scroll( + parent: &B256, + attributes: &ScrollPayloadAttributes, + payload_version: u8, +) -> PayloadId { + use sha2::Digest; + let mut hasher = sha2::Sha256::new(); + hasher.update(parent.as_slice()); + hasher.update(&attributes.payload_attributes.timestamp.to_be_bytes()[..]); + hasher.update(attributes.payload_attributes.prev_randao.as_slice()); + hasher.update(attributes.payload_attributes.suggested_fee_recipient.as_slice()); + if let Some(withdrawals) = &attributes.payload_attributes.withdrawals { + let mut buf = Vec::new(); + withdrawals.encode(&mut buf); + hasher.update(buf); + } + + if let Some(parent_beacon_block) = attributes.payload_attributes.parent_beacon_block_root { + hasher.update(parent_beacon_block); + } + + let no_tx_pool = attributes.no_tx_pool; + if no_tx_pool || attributes.transactions.as_ref().is_some_and(|txs| !txs.is_empty()) { + hasher.update([no_tx_pool as u8]); + let txs_len = attributes.transactions.as_ref().map(|txs| txs.len()).unwrap_or_default(); + hasher.update(&txs_len.to_be_bytes()[..]); + if let Some(txs) = &attributes.transactions { + for tx in txs { + // we have to just hash the bytes here because otherwise we would need to decode + // the transactions here which really isn't ideal + let tx_hash = keccak256(tx); + // maybe we can try just taking the hash and not decoding + hasher.update(tx_hash) + } + } + } + + if let Some(extra_data) = &attributes.block_data_hint.extra_data { + hasher.update(extra_data); + } + if let Some(state_root) = &attributes.block_data_hint.state_root { + hasher.update(state_root.0); + } + if let Some(coinbase) = &attributes.block_data_hint.coinbase { + hasher.update(coinbase); + } + if let Some(nonce) = &attributes.block_data_hint.nonce { + hasher.update(nonce.to_be_bytes()); + } + if let Some(difficulty) = &attributes.block_data_hint.difficulty { + hasher.update(difficulty.to_be_bytes::<32>()); + } + if let Some(gas_limit) = attributes.gas_limit { + hasher.update(gas_limit.to_be_bytes()); + } + + let mut out = hasher.finalize(); + out[0] = payload_version; + PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) +} + +impl From<EthPayloadBuilderAttributes> for ScrollPayloadBuilderAttributes { + fn from(value: EthPayloadBuilderAttributes) -> Self { + Self { payload_attributes: value, ..Default::default() } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::str::FromStr; + use alloy_primitives::{address, b256, bytes, FixedBytes, U256}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_payload_primitives::EngineApiMessageVersion; + + #[test] + fn test_payload_id() { + let expected = + PayloadId::new(FixedBytes::<8>::from_str("0x036369370c155d4c").unwrap().into()); + let attrs = ScrollPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 1728933301, + prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), + suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), + withdrawals: Some([].into()), + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into(), + }, + transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), + no_tx_pool: false, + block_data_hint: BlockDataHint{ + extra_data: Some(bytes!("476574682f76312e302e302f6c696e75782f676f312e342e32")), + state_root: Some(b256!("0x000000000000000000000000000000000000000000000000000000000000dead")), + coinbase: Some(address!("0x000000000000000000000000000000000000dead")), + nonce: Some(u64::MAX), + difficulty: Some(U256::from(10)) + }, + gas_limit: Some(10_000_000), + }; + + assert_eq!( + expected, + payload_id_scroll( + &b256!("3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"), + &attrs, + EngineApiMessageVersion::V3 as u8 + ) + ); + } +}
diff --git reth/crates/scroll/engine-primitives/src/payload/built.rs scroll-reth/crates/scroll/engine-primitives/src/payload/built.rs new file mode 100644 index 0000000000000000000000000000000000000000..cea13302aa1c9cfacba619cebd2b2f90613e6593 --- /dev/null +++ scroll-reth/crates/scroll/engine-primitives/src/payload/built.rs @@ -0,0 +1,135 @@ +//! Outcome of a Scroll block building task with payload attributes provided via the Engine API. + +use core::iter; +use std::sync::Arc; + +use alloy_eips::eip7685::Requests; +use alloy_primitives::U256; +use alloy_rpc_types_engine::{ + BlobsBundleV1, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, + ExecutionPayloadEnvelopeV4, ExecutionPayloadFieldV2, ExecutionPayloadV1, ExecutionPayloadV3, + PayloadId, +}; +use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_payload_primitives::BuiltPayload; +use reth_primitives_traits::SealedBlock; +use reth_scroll_primitives::{ScrollBlock, ScrollPrimitives}; + +/// Contains the built payload. +#[derive(Debug, Clone, Default)] +pub struct ScrollBuiltPayload { + /// Identifier of the payload + pub(crate) id: PayloadId, + /// Sealed block + pub(crate) block: Arc<SealedBlock<ScrollBlock>>, + /// Block execution data for the payload + pub(crate) executed_block: Option<ExecutedBlockWithTrieUpdates<ScrollPrimitives>>, + /// The fees of the block + pub(crate) fees: U256, +} + +impl ScrollBuiltPayload { + /// Initializes the payload with the given initial block. + pub const fn new( + id: PayloadId, + block: Arc<SealedBlock<ScrollBlock>>, + executed_block: Option<ExecutedBlockWithTrieUpdates<ScrollPrimitives>>, + fees: U256, + ) -> Self { + Self { id, block, executed_block, fees } + } + + /// Returns the identifier of the payload. + pub const fn id(&self) -> PayloadId { + self.id + } + + /// Returns the built block(sealed) + pub fn block(&self) -> &SealedBlock<ScrollBlock> { + &self.block + } + + /// Fees of the block + pub const fn fees(&self) -> U256 { + self.fees + } + + /// Converts the value into [`SealedBlock`]. + pub fn into_sealed_block(self) -> SealedBlock<ScrollBlock> { + Arc::unwrap_or_clone(self.block) + } +} + +impl BuiltPayload for ScrollBuiltPayload { + type Primitives = ScrollPrimitives; + + fn block(&self) -> &SealedBlock<ScrollBlock> { + self.block() + } + + fn fees(&self) -> U256 { + self.fees + } + + fn executed_block(&self) -> Option<ExecutedBlockWithTrieUpdates<Self::Primitives>> { + self.executed_block.clone() + } + + fn requests(&self) -> Option<Requests> { + None + } +} + +// V1 engine_getPayloadV1 response +impl From<ScrollBuiltPayload> for ExecutionPayloadV1 { + fn from(value: ScrollBuiltPayload) -> Self { + Self::from_block_unchecked( + value.block().hash(), + &Arc::unwrap_or_clone(value.block).into_block(), + ) + } +} + +// V2 engine_getPayloadV2 response +impl From<ScrollBuiltPayload> for ExecutionPayloadEnvelopeV2 { + fn from(value: ScrollBuiltPayload) -> Self { + let ScrollBuiltPayload { block, fees, .. } = value; + + Self { + block_value: fees, + execution_payload: ExecutionPayloadFieldV2::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block).into_block(), + ), + } + } +} + +impl From<ScrollBuiltPayload> for ExecutionPayloadEnvelopeV3 { + fn from(value: ScrollBuiltPayload) -> Self { + let ScrollBuiltPayload { block, fees, .. } = value; + + Self { + execution_payload: ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block).into_block(), + ), + block_value: fees, + // From the engine API spec: + // + // > Client software **MAY** use any heuristics to decide whether to set + // `shouldOverrideBuilder` flag or not. If client software does not implement any + // heuristic this flag **SHOULD** be set to `false`. + // + // Spec: + // <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#specification-2> + should_override_builder: false, + blobs_bundle: BlobsBundleV1::new(iter::empty()), + } + } +} +impl From<ScrollBuiltPayload> for ExecutionPayloadEnvelopeV4 { + fn from(value: ScrollBuiltPayload) -> Self { + Self { envelope_inner: value.into(), execution_requests: Default::default() } + } +}
diff --git reth/crates/scroll/engine-primitives/src/payload/mod.rs scroll-reth/crates/scroll/engine-primitives/src/payload/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7fd48170173de33c7b2cab42bad7690f31c62c2 --- /dev/null +++ scroll-reth/crates/scroll/engine-primitives/src/payload/mod.rs @@ -0,0 +1,331 @@ +//! Engine API Payload types. + +mod attributes; +pub use attributes::ScrollPayloadBuilderAttributes; + +mod built; +pub use built::ScrollBuiltPayload; + +use alloc::{sync::Arc, vec::Vec}; +use core::marker::PhantomData; + +use alloy_consensus::{proofs, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::eip2718::Decodable2718; +use alloy_primitives::U256; +use alloy_rlp::BufMut; +use alloy_rpc_types_engine::{ + ExecutionData, ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, + ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, + PayloadError, +}; +use reth_engine_primitives::EngineTypes; +use reth_payload_primitives::{BuiltPayload, PayloadTypes}; +use reth_primitives::{Block, BlockBody, Header}; +use reth_primitives_traits::{NodePrimitives, SealedBlock}; +use reth_scroll_primitives::ScrollBlock; +use scroll_alloy_hardforks::ScrollHardforks; +use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; + +/// The types used in the default Scroll beacon consensus engine. +#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] +#[non_exhaustive] +pub struct ScrollEngineTypes<T: PayloadTypes = ScrollPayloadTypes> { + _marker: PhantomData<T>, +} + +impl< + T: PayloadTypes< + ExecutionData = ExecutionData, + BuiltPayload: BuiltPayload<Primitives: NodePrimitives<Block = ScrollBlock>>, + >, + > PayloadTypes for ScrollEngineTypes<T> +{ + type ExecutionData = T::ExecutionData; + type BuiltPayload = T::BuiltPayload; + type PayloadAttributes = T::PayloadAttributes; + type PayloadBuilderAttributes = T::PayloadBuilderAttributes; + + fn block_to_payload( + block: SealedBlock< + <<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block, + >, + ) -> ExecutionData { + let (payload, sidecar) = + ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); + ExecutionData { payload, sidecar } + } +} + +impl<T> EngineTypes for ScrollEngineTypes<T> +where + T: PayloadTypes<ExecutionData = ExecutionData>, + T::BuiltPayload: BuiltPayload<Primitives: NodePrimitives<Block = ScrollBlock>> + + TryInto<ExecutionPayloadV1> + + TryInto<ExecutionPayloadEnvelopeV2> + + TryInto<ExecutionPayloadEnvelopeV3> + + TryInto<ExecutionPayloadEnvelopeV4>, +{ + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV5 = ExecutionPayloadEnvelopeV4; +} + +/// A default payload type for [`ScrollEngineTypes`] +#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] +#[non_exhaustive] +pub struct ScrollPayloadTypes; + +impl PayloadTypes for ScrollPayloadTypes { + type ExecutionData = ExecutionData; + type BuiltPayload = ScrollBuiltPayload; + type PayloadAttributes = ScrollPayloadAttributes; + type PayloadBuilderAttributes = ScrollPayloadBuilderAttributes; + + fn block_to_payload( + block: SealedBlock< + <<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block, + >, + ) -> Self::ExecutionData { + let (payload, sidecar) = + ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block()); + ExecutionData { payload, sidecar } + } +} + +/// Tries to create a new unsealed block from the given payload, sidecar and chain specification. +/// Sets the base fee of the block to `None` before the Curie hardfork. +/// Scroll implementation of the [`ExecutionPayload::try_into_block`], which will fail with +/// [`PayloadError::ExtraData`] due to the Scroll blocks containing extra data for the Clique +/// consensus. +pub fn try_into_block<T: Decodable2718, CS: ScrollHardforks>( + value: ExecutionData, + chainspec: Arc<CS>, +) -> Result<Block<T>, PayloadError> { + let mut block = match value.payload { + ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload, chainspec)?, + ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload, chainspec)?, + ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload, chainspec)?, + }; + + block.header.parent_beacon_block_root = value.sidecar.parent_beacon_block_root(); + block.header.requests_hash = value.sidecar.requests_hash(); + + Ok(block) +} + +/// Tries to convert an [`ExecutionPayloadV1`] to [`Block`]. +fn try_payload_v1_to_block<T: Decodable2718, CS: ScrollHardforks>( + payload: ExecutionPayloadV1, + chainspec: CS, +) -> Result<Block<T>, PayloadError> { + // WARNING: It’s allowed for a base fee in EIP1559 to increase unbounded. We assume that + // it will fit in an u64. This is not always necessarily true, although it is extremely + // unlikely not to be the case, a u64 maximum would have 2^64 which equates to 18 ETH per + // gas. + let basefee = chainspec + .is_curie_active_at_block(payload.block_number) + .then_some(payload.base_fee_per_gas) + .map(|b| b.try_into()) + .transpose() + .map_err(|_| PayloadError::BaseFee(payload.base_fee_per_gas))?; + + let transactions = payload + .transactions + .iter() + .map(|tx| { + let mut buf = tx.as_ref(); + + let tx = T::decode_2718(&mut buf).map_err(alloy_rlp::Error::from)?; + + if !buf.is_empty() { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(tx) + }) + .collect::<Result<Vec<_>, _>>()?; + + // Reuse the encoded bytes for root calculation + let transactions_root = + proofs::ordered_trie_root_with_encoder(&payload.transactions, |item, buf| { + buf.put_slice(item) + }); + + let header = Header { + parent_hash: payload.parent_hash, + beneficiary: payload.fee_recipient, + state_root: payload.state_root, + transactions_root, + receipts_root: payload.receipts_root, + withdrawals_root: None, + logs_bloom: payload.logs_bloom, + number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + mix_hash: payload.prev_randao, + base_fee_per_gas: basefee, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + extra_data: payload.extra_data, + // Defaults + ommers_hash: EMPTY_OMMER_ROOT_HASH, + difficulty: U256::ONE, + nonce: Default::default(), + }; + + Ok(Block { header, body: BlockBody { transactions, ..Default::default() } }) +} + +/// Tries to convert an [`ExecutionPayloadV2`] to [`Block`]. +fn try_payload_v2_to_block<T: Decodable2718, CS: ScrollHardforks>( + payload: ExecutionPayloadV2, + chainspec: CS, +) -> Result<Block<T>, PayloadError> { + // this performs the same conversion as the underlying V1 payload, but calculates the + // withdrawals root and adds withdrawals + let mut base_sealed_block = try_payload_v1_to_block(payload.payload_inner, chainspec)?; + let withdrawals_root = proofs::calculate_withdrawals_root(&payload.withdrawals); + base_sealed_block.body.withdrawals = Some(payload.withdrawals.into()); + base_sealed_block.header.withdrawals_root = Some(withdrawals_root); + Ok(base_sealed_block) +} + +/// Tries to convert an [`ExecutionPayloadV3`] to [`Block`]. +fn try_payload_v3_to_block<T: Decodable2718, CS: ScrollHardforks>( + payload: ExecutionPayloadV3, + chainspec: CS, +) -> Result<Block<T>, PayloadError> { + // this performs the same conversion as the underlying V2 payload, but inserts the blob gas + // used and excess blob gas + let mut base_block = try_payload_v2_to_block(payload.payload_inner, chainspec)?; + + base_block.header.blob_gas_used = Some(payload.blob_gas_used); + base_block.header.excess_blob_gas = Some(payload.excess_blob_gas); + + Ok(base_block) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{Address, Bloom, B256, U256}; + use alloy_rpc_types_engine::ExecutionPayloadV1; + use arbitrary::{Arbitrary, Unstructured}; + use rand::Rng; + use reth_scroll_chainspec::SCROLL_MAINNET; + use reth_scroll_primitives::ScrollTransactionSigned; + + #[test] + fn test_can_convert_execution_v1_payload_into_block() -> eyre::Result<()> { + let mut bytes = [0u8; 1024]; + rand::rng().fill(bytes.as_mut_slice()); + let mut u = Unstructured::new(&bytes); + + let mut extra_data = [0u8; 64]; + rand::rng().fill(extra_data.as_mut_slice()); + + let execution_payload = ExecutionPayload::V1(ExecutionPayloadV1 { + parent_hash: B256::random(), + fee_recipient: Address::random(), + state_root: B256::random(), + receipts_root: B256::random(), + logs_bloom: Bloom::random(), + prev_randao: B256::random(), + block_number: u64::arbitrary(&mut u)?, + gas_limit: u64::arbitrary(&mut u)?, + gas_used: u64::arbitrary(&mut u)?, + timestamp: u64::arbitrary(&mut u)?, + extra_data: extra_data.into(), + base_fee_per_gas: U256::from(u64::arbitrary(&mut u)?), + block_hash: B256::random(), + transactions: vec![], + }); + let execution_data = ExecutionData::new(execution_payload, Default::default()); + + let _: Block<ScrollTransactionSigned> = + try_into_block(execution_data, SCROLL_MAINNET.clone())?; + + Ok(()) + } + + #[test] + fn test_can_convert_execution_v2_payload_into_block() -> eyre::Result<()> { + let mut bytes = [0u8; 1024]; + rand::rng().fill(bytes.as_mut_slice()); + let mut u = Unstructured::new(&bytes); + + let mut extra_data = [0u8; 64]; + rand::rng().fill(extra_data.as_mut_slice()); + + let execution_payload = ExecutionPayload::V2(ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: B256::random(), + fee_recipient: Address::random(), + state_root: B256::random(), + receipts_root: B256::random(), + logs_bloom: Bloom::random(), + prev_randao: B256::random(), + block_number: u64::arbitrary(&mut u)?, + gas_limit: u64::arbitrary(&mut u)?, + gas_used: u64::arbitrary(&mut u)?, + timestamp: u64::arbitrary(&mut u)?, + extra_data: extra_data.into(), + base_fee_per_gas: U256::from(u64::arbitrary(&mut u)?), + block_hash: B256::random(), + transactions: vec![], + }, + withdrawals: vec![], + }); + let execution_data = ExecutionData::new(execution_payload, Default::default()); + + let _: Block<ScrollTransactionSigned> = + try_into_block(execution_data, SCROLL_MAINNET.clone())?; + + Ok(()) + } + + #[test] + fn test_can_convert_execution_v3_payload_into_block() -> eyre::Result<()> { + let mut bytes = [0u8; 1024]; + rand::rng().fill(bytes.as_mut_slice()); + let mut u = Unstructured::new(&bytes); + + let mut extra_data = [0u8; 64]; + rand::rng().fill(extra_data.as_mut_slice()); + + let execution_payload = ExecutionPayload::V3(ExecutionPayloadV3 { + payload_inner: ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: B256::random(), + fee_recipient: Address::random(), + state_root: B256::random(), + receipts_root: B256::random(), + logs_bloom: Bloom::random(), + prev_randao: B256::random(), + block_number: u64::arbitrary(&mut u)?, + gas_limit: u64::arbitrary(&mut u)?, + gas_used: u64::arbitrary(&mut u)?, + timestamp: u64::arbitrary(&mut u)?, + extra_data: extra_data.into(), + base_fee_per_gas: U256::from(u64::arbitrary(&mut u)?), + block_hash: B256::random(), + transactions: vec![], + }, + withdrawals: vec![], + }, + blob_gas_used: 0, + excess_blob_gas: 0, + }); + let execution_data = ExecutionData::new(execution_payload, Default::default()); + + let _: Block<ScrollTransactionSigned> = + try_into_block(execution_data, SCROLL_MAINNET.clone())?; + + Ok(()) + } +}
diff --git reth/crates/scroll/evm/Cargo.toml scroll-reth/crates/scroll/evm/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e1cd82d674b5525ca5875f6f7589a5ace6ddd959 --- /dev/null +++ scroll-reth/crates/scroll/evm/Cargo.toml @@ -0,0 +1,79 @@ +[package] +name = "reth-scroll-evm" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-evm = { workspace = true, features = ["scroll-alloy-traits"] } +reth-execution-types.workspace = true +reth-primitives = { workspace = true, features = ["serde-bincode-compat"] } +reth-primitives-traits.workspace = true +reth-storage-api.workspace = true + +# revm +revm = { workspace = true, features = ["optional_no_base_fee"] } +revm-primitives.workspace = true +revm-scroll.workspace = true + +# scroll +reth-scroll-chainspec.workspace = true +reth-scroll-forks.workspace = true +reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat"], default-features = false } + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-evm.workspace = true +alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true + +# scroll +scroll-alloy-consensus.workspace = true +scroll-alloy-evm.workspace = true +scroll-alloy-hardforks.workspace = true + +# misc +derive_more.workspace = true +thiserror.workspace = true +tracing.workspace = true + +[dev-dependencies] +alloy-primitives = { workspace = true, features = ["getrandom", "rand"] } +eyre.workspace = true + +[features] +std = [ + "scroll-alloy-consensus/std", + "scroll-alloy-evm/std", + "alloy-consensus/std", + "alloy-evm/std", + "alloy-eips/std", + "alloy-primitives/std", + "derive_more/std", + "reth-chainspec/std", + "reth-evm/std", + "reth-execution-types/std", + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-scroll-chainspec/std", + "reth-scroll-forks/std", + "reth-scroll-primitives/std", + "revm-primitives/std", + "revm-scroll/std", + "revm/std", + "thiserror/std", + "tracing/std", + "scroll-alloy-hardforks/std", + "reth-storage-api/std", + "alloy-rpc-types-engine/std", +]
diff --git reth/crates/scroll/evm/src/base_fee.rs scroll-reth/crates/scroll/evm/src/base_fee.rs new file mode 100644 index 0000000000000000000000000000000000000000..411ac75bbab42cd2c0a10aa6ce2e894054657c9a --- /dev/null +++ scroll-reth/crates/scroll/evm/src/base_fee.rs @@ -0,0 +1,268 @@ +use alloy_consensus::BlockHeader; +use alloy_eips::calc_next_block_base_fee; +use alloy_primitives::U256; +use reth_chainspec::EthChainSpec; +use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; +use reth_storage_api::{BaseFeeProvider, StorageProvider}; +use scroll_alloy_evm::gas_price_oracle::{GPO_L1_BASE_FEE_SLOT, L1_GAS_PRICE_ORACLE_ADDRESS}; +use scroll_alloy_hardforks::ScrollHardforks; + +/// Protocol-enforced maximum L2 base fee. +pub const MAX_L2_BASE_FEE: u64 = 10_000_000_000; + +/// The base fee overhead slot. +const L2_BASE_FEE_OVERHEAD_SLOT: U256 = U256::from_limbs([101, 0, 0, 0]); + +/// The default base fee overhead, in case the L2 system contract isn't deployed or +/// initialized. +pub const DEFAULT_BASE_FEE_OVERHEAD: U256 = U256::from_limbs([15_680_000, 0, 0, 0]); + +/// The base fee scalar slot. +const L2_BASE_FEE_SCALAR_SLOT: U256 = U256::from_limbs([102, 0, 0, 0]); + +/// The default scalar applied on the L1 base fee, in case the L2 system contract isn't deployed or +/// initialized. +pub const DEFAULT_BASE_FEE_SCALAR: U256 = U256::from_limbs([34_000_000_000_000, 0, 0, 0]); + +/// The precision of the L1 base fee. +pub const L1_BASE_FEE_PRECISION: U256 = U256::from_limbs([1_000_000_000_000_000_000, 0, 0, 0]); + +/// The initial base fee. +const INITIAL_BASE_FEE: u64 = 10_000_000; + +/// The Scroll base fee provider implementation. +#[derive(Clone, Debug, Default)] +pub struct ScrollBaseFeeProvider<ChainSpec>(ChainSpec); + +impl<ChainSpec> ScrollBaseFeeProvider<ChainSpec> { + /// Returns a new instance of a [`ScrollBaseFeeProvider`]. + pub const fn new(chain_spec: ChainSpec) -> Self { + Self(chain_spec) + } +} + +impl<ChainSpec, P> BaseFeeProvider<P> for ScrollBaseFeeProvider<ChainSpec> +where + ChainSpec: EthChainSpec + ScrollHardforks + ChainConfig<Config = ScrollChainConfig>, + P: StorageProvider, +{ + fn next_block_base_fee<H: BlockHeader>( + &self, + provider: &mut P, + parent_header: &H, + ts: u64, + ) -> Result<u64, P::Error> { + let chain_spec = &self.0; + + // Return early if Curie isn't active. This branch will be taken by the + // `ScrollPayloadBuilder` when executing `PayloadAttributes` that were derived from the L1 + // (during the L1 consolidation phase of the Rollup Node). + if !chain_spec.is_curie_active_at_block(parent_header.number() + 1) { + return Ok(0); + } + + // load l2 system config contract into cache. + let system_config_contract_address = + chain_spec.chain_config().l1_config.l2_system_config_address; + // query scalar and overhead. + let (mut scalar, mut overhead) = ( + provider.storage(system_config_contract_address, L2_BASE_FEE_SCALAR_SLOT)?, + provider.storage(system_config_contract_address, L2_BASE_FEE_OVERHEAD_SLOT)?, + ); + // if any value is 0, use the default values. + (scalar, overhead) = ( + if scalar == U256::ZERO { DEFAULT_BASE_FEE_SCALAR } else { scalar }, + if overhead == U256::ZERO { DEFAULT_BASE_FEE_OVERHEAD } else { overhead }, + ); + + let mut base_fee = if chain_spec.is_feynman_active_at_timestamp(ts) { + feynman_base_fee(chain_spec, parent_header, ts, overhead.saturating_to()) + } else { + let parent_l1_base_fee = + provider.storage(L1_GAS_PRICE_ORACLE_ADDRESS, GPO_L1_BASE_FEE_SLOT)?; + pre_feynman_base_fee(parent_l1_base_fee, scalar, overhead).saturating_to() + }; + + if base_fee > MAX_L2_BASE_FEE { + base_fee = MAX_L2_BASE_FEE; + } + + Ok(base_fee) + } +} + +/// Returns the Feynman base fee. +fn feynman_base_fee<H: BlockHeader, ChainSpec: EthChainSpec + ScrollHardforks>( + chainspec: ChainSpec, + parent_header: H, + ts: u64, + overhead: u64, +) -> u64 { + let eip_1559_base_fee = if chainspec.is_feynman_active_at_timestamp(parent_header.timestamp()) { + // extract the eip 1559 base fee from parent header by subtracting overhead from it. + let parent_eip_1559_base_fee = + parent_header.base_fee_per_gas().expect("Feynman active").saturating_sub(overhead); + let base_fee_params = chainspec.base_fee_params_at_timestamp(ts); + calc_next_block_base_fee( + parent_header.gas_used(), + parent_header.gas_limit(), + parent_eip_1559_base_fee, + base_fee_params, + ) + } else { + // this is the first Feynman block. + // if the parent has a base fee, return it. + if let Some(base_fee) = parent_header.base_fee_per_gas() { + base_fee + } else { + INITIAL_BASE_FEE + } + }; + + eip_1559_base_fee.saturating_add(overhead) +} + +/// Returns the pre Feynman base fee. +fn pre_feynman_base_fee(parent_l1_base_fee: U256, scalar: U256, overhead: U256) -> U256 { + // l1 base fee * scalar / precision + overhead. + parent_l1_base_fee * scalar / L1_BASE_FEE_PRECISION + overhead +} + +#[cfg(test)] +mod tests { + use super::*; + use std::boxed::Box; + + use alloy_consensus::BlockHeader; + use reth_scroll_chainspec::SCROLL_MAINNET; + use revm::database::{states::plain_account::PlainStorage, EmptyDB, State}; + use scroll_alloy_hardforks::ScrollHardfork; + + const CURIE_PARAMS_TEST_CASES: [(u64, u64); 8] = [ + (0u64, 15680000u64), + (1000000000, 15714000), + (2000000000, 15748000), + (100000000000, 19080000), + (111111111111, 19457777), + (2164000000000, 89256000), + (644149677419355, 10000000000), + (0x1c3c0f442u64, 15937691), + ]; + + const OVERWRITTEN_PARAMS_TEST_CASES: [(u64, u64); 7] = [ + (0, 1), + (1000000000, 1), + (2000000000, 1), + (100000000000, 2), + (111111111111, 2), + (2164000000000, 22), + (644149677419355, 6442), + ]; + + const EIP_1559_TEST_CASES: [(u64, u64, u64, u64); 3] = [ + (1000000000, 20000000, 10000000, 1000000000), // usage == target + (1000000001, 20000000, 9000000, 987500001), // usage below target + (1000000001, 20000000, 11000000, 1012500001), // usage above target + ]; + + const CURIE_TIMESTAMP: u64 = 1719994280; + const CURIE_BLOCK: u64 = 7096836; + + #[test] + fn test_should_return_correct_base_fee() -> Result<(), Box<dyn core::error::Error>> { + // init the state db. + let db = EmptyDB::new(); + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + // init the provider and parent header. + let base_fee_provider = ScrollBaseFeeProvider::new(SCROLL_MAINNET.clone()); + let parent_header = alloy_consensus::Header { + timestamp: CURIE_TIMESTAMP, + number: CURIE_BLOCK, + ..Default::default() + }; + let parent_header_ts = parent_header.timestamp(); + + // helper closure to insert the l1 base fee in state. + let insert_l1_base_fee = |state: &mut State<EmptyDB>, l1_base_fee: u64| { + let oracle_storage_pre_fork = + PlainStorage::from_iter([(GPO_L1_BASE_FEE_SLOT, U256::from(l1_base_fee))]); + state.insert_account_with_storage( + L1_GAS_PRICE_ORACLE_ADDRESS, + Default::default(), + oracle_storage_pre_fork, + ); + }; + + // for each test case, insert the l1 base fee and check the expected value matches. + for (l1_base_fee, expected_base_fee) in CURIE_PARAMS_TEST_CASES { + insert_l1_base_fee(&mut state, l1_base_fee); + + // fetch base fee from db. + let base_fee = base_fee_provider.next_block_base_fee( + &mut state, + &parent_header, + parent_header_ts + 1, + )?; + assert_eq!(base_fee, expected_base_fee); + } + + // insert the base fee params. + let system_contract_storage = PlainStorage::from_iter([ + (L2_BASE_FEE_SCALAR_SLOT, U256::from(10000000)), + (L2_BASE_FEE_OVERHEAD_SLOT, U256::ONE), + ]); + state.insert_account_with_storage( + SCROLL_MAINNET.config.l1_config.l2_system_config_address, + Default::default(), + system_contract_storage, + ); + + // for each test case, insert the l1 base fee and check the expected value matches. + for (l1_base_fee, expected_base_fee) in OVERWRITTEN_PARAMS_TEST_CASES { + insert_l1_base_fee(&mut state, l1_base_fee); + + // fetch base fee from db. + let base_fee = base_fee_provider.next_block_base_fee( + &mut state, + &parent_header, + parent_header_ts + 1, + )?; + assert_eq!(base_fee, expected_base_fee); + } + + // update the parent header used to activate Feynman. + let feynman_fork_ts = SCROLL_MAINNET + .hardforks + .get(ScrollHardfork::Feynman) + .unwrap() + .as_timestamp() + .expect("Feynman is timestamp based forked."); + let mut parent_header = alloy_consensus::Header { + timestamp: feynman_fork_ts + 1, + number: CURIE_BLOCK + 1, + ..Default::default() + }; + let parent_header_ts = parent_header.timestamp(); + + // for each test case, update the parent header fields and check the expected value matches. + for (parent_base_fee, parent_gas_limit, parent_gas_used, expected_base_fee) in + EIP_1559_TEST_CASES + { + parent_header.base_fee_per_gas = Some(parent_base_fee); + parent_header.gas_limit = parent_gas_limit; + parent_header.gas_used = parent_gas_used; + + // fetch base fee from db. + let base_fee = base_fee_provider.next_block_base_fee( + &mut state, + &parent_header, + parent_header_ts + 1, + )?; + assert_eq!(base_fee, expected_base_fee); + } + + Ok(()) + } +}
diff --git reth/crates/scroll/evm/src/build.rs scroll-reth/crates/scroll/evm/src/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad39ff5a4ec88bc5b6a10f0d76fceb1c90a2273c --- /dev/null +++ scroll-reth/crates/scroll/evm/src/build.rs @@ -0,0 +1,95 @@ +use alloc::sync::Arc; +use alloy_consensus::{proofs, BlockBody, Header, TxReceipt, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::merge::BEACON_NONCE; +use alloy_evm::block::{BlockExecutionError, BlockExecutorFactory}; +use alloy_primitives::{logs_bloom, Address}; +use reth_evm::execute::{BlockAssembler, BlockAssemblerInput}; +use reth_execution_types::BlockExecutionResult; +use reth_primitives_traits::SignedTransaction; +use reth_scroll_primitives::ScrollReceipt; +use revm::context::Block; +use scroll_alloy_evm::ScrollBlockExecutionCtx; +use scroll_alloy_hardforks::ScrollHardforks; + +/// Block builder for Scroll. +#[derive(Debug)] +pub struct ScrollBlockAssembler<ChainSpec> { + chain_spec: Arc<ChainSpec>, +} + +impl<ChainSpec> ScrollBlockAssembler<ChainSpec> { + /// Creates a new [`ScrollBlockAssembler`]. + pub const fn new(chain_spec: Arc<ChainSpec>) -> Self { + Self { chain_spec } + } +} + +impl<ChainSpec> Clone for ScrollBlockAssembler<ChainSpec> { + fn clone(&self) -> Self { + Self { chain_spec: self.chain_spec.clone() } + } +} + +impl<F, ChainSpec> BlockAssembler<F> for ScrollBlockAssembler<ChainSpec> +where + ChainSpec: ScrollHardforks, + F: for<'a> BlockExecutorFactory< + ExecutionCtx<'a> = ScrollBlockExecutionCtx, + Transaction: SignedTransaction, + Receipt = ScrollReceipt, + >, +{ + type Block = alloy_consensus::Block<F::Transaction>; + + fn assemble_block( + &self, + input: BlockAssemblerInput<'_, '_, F>, + ) -> Result<Self::Block, BlockExecutionError> { + let BlockAssemblerInput { + evm_env, + execution_ctx: ctx, + transactions, + output: BlockExecutionResult { receipts, gas_used, .. }, + state_root, + .. + } = input; + + let timestamp = evm_env.block_env.timestamp(); + + let transactions_root = proofs::calculate_transaction_root(&transactions); + let receipts_root = ScrollReceipt::calculate_receipt_root_no_memo(receipts); + let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| r.logs())); + + let header = Header { + parent_hash: ctx.parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: Address::ZERO, + state_root, + transactions_root, + receipts_root, + withdrawals_root: None, + logs_bloom, + timestamp: timestamp.to(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), + nonce: BEACON_NONCE.into(), + base_fee_per_gas: self + .chain_spec + .is_curie_active_at_block(evm_env.block_env.number().to()) + .then_some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), + gas_used: *gas_used, + extra_data: Default::default(), + parent_beacon_block_root: None, + blob_gas_used: None, + excess_blob_gas: None, + requests_hash: None, + }; + + Ok(alloy_consensus::Block::new( + header, + BlockBody { transactions, ommers: Default::default(), withdrawals: None }, + )) + } +}
diff --git reth/crates/scroll/evm/src/config.rs scroll-reth/crates/scroll/evm/src/config.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab603bb8d6820ccf6dae558eaf4dc57572036f9e --- /dev/null +++ scroll-reth/crates/scroll/evm/src/config.rs @@ -0,0 +1,378 @@ +use crate::{build::ScrollBlockAssembler, ScrollEvmConfig, ScrollNextBlockEnvAttributes}; +use alloc::sync::Arc; + +use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{eip2718::WithEncoded, Decodable2718}; +use alloy_evm::{FromRecoveredTx, FromTxWithEncoded}; +use alloy_primitives::B256; +use alloy_rpc_types_engine::ExecutionData; +use core::convert::Infallible; +use reth_chainspec::EthChainSpec; +use reth_evm::{ + ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, ExecutableTxIterator, ExecutionCtxFor, +}; +use reth_primitives_traits::{ + BlockTy, NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, TxTy, +}; +use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; +use reth_scroll_primitives::ScrollReceipt; +use reth_storage_api::errors::any::AnyError; +use revm::{ + context::{BlockEnv, CfgEnv, TxEnv}, + primitives::U256, +}; +use revm_scroll::ScrollSpecId; +use scroll_alloy_evm::{ + ScrollBlockExecutionCtx, ScrollBlockExecutorFactory, ScrollPrecompilesFactory, + ScrollReceiptBuilder, ScrollTransactionIntoTxEnv, +}; +use scroll_alloy_hardforks::ScrollHardforks; + +impl<ChainSpec, N, R, P> ConfigureEvm for ScrollEvmConfig<ChainSpec, N, R, P> +where + ChainSpec: EthChainSpec + ChainConfig<Config = ScrollChainConfig> + ScrollHardforks, + N: NodePrimitives< + Receipt = R::Receipt, + SignedTx = R::Transaction, + BlockHeader = Header, + BlockBody = alloy_consensus::BlockBody<R::Transaction>, + Block = alloy_consensus::Block<R::Transaction>, + >, + ScrollTransactionIntoTxEnv<TxEnv>: + FromRecoveredTx<N::SignedTx> + FromTxWithEncoded<N::SignedTx>, + R: ScrollReceiptBuilder<Receipt = ScrollReceipt, Transaction: SignedTransaction>, + P: ScrollPrecompilesFactory, + Self: Send + Sync + Unpin + Clone + 'static, +{ + type Primitives = N; + type Error = Infallible; + type NextBlockEnvCtx = ScrollNextBlockEnvAttributes; + type BlockExecutorFactory = ScrollBlockExecutorFactory<R, Arc<ChainSpec>, P>; + type BlockAssembler = ScrollBlockAssembler<ChainSpec>; + + fn block_executor_factory(&self) -> &Self::BlockExecutorFactory { + &self.executor_factory + } + + fn block_assembler(&self) -> &Self::BlockAssembler { + &self.block_assembler + } + + fn evm_env(&self, header: &N::BlockHeader) -> Result<EvmEnv<ScrollSpecId>, Self::Error> { + let chain_spec = self.chain_spec(); + let spec_id = self.spec_id_at_timestamp_and_number(header.timestamp(), header.number()); + + let cfg_env = CfgEnv::<ScrollSpecId>::default() + .with_spec(spec_id) + .with_chain_id(chain_spec.chain().id()); + + // get coinbase from chain spec + let coinbase = if let Some(vault_address) = chain_spec.chain_config().fee_vault_address { + vault_address + } else { + header.beneficiary() + }; + + let block_env = BlockEnv { + number: U256::from(header.number()), + beneficiary: coinbase, + timestamp: U256::from(header.timestamp()), + difficulty: header.difficulty(), + prevrandao: header.mix_hash(), + gas_limit: header.gas_limit(), + basefee: header.base_fee_per_gas().unwrap_or_default(), + // EIP-4844 excess blob gas of this block, introduced in Cancun + blob_excess_gas_and_price: None, + }; + + Ok(EvmEnv { cfg_env, block_env }) + } + + fn next_evm_env( + &self, + parent: &N::BlockHeader, + attributes: &Self::NextBlockEnvCtx, + ) -> Result<EvmEnv<ScrollSpecId>, Self::Error> { + // ensure we're not missing any timestamp based hardforks + let spec_id = + self.spec_id_at_timestamp_and_number(attributes.timestamp, parent.number() + 1); + + let chain_spec = self.chain_spec(); + + // configure evm env based on parent block + let cfg_env = CfgEnv::<ScrollSpecId>::default() + .with_chain_id(chain_spec.chain().id()) + .with_spec(spec_id); + + // get coinbase from chain spec + let coinbase = if let Some(vault_address) = chain_spec.chain_config().fee_vault_address { + vault_address + } else { + attributes.suggested_fee_recipient + }; + + let block_env = BlockEnv { + number: U256::from(parent.number() + 1), + beneficiary: coinbase, + timestamp: U256::from(attributes.timestamp), + difficulty: U256::ONE, + prevrandao: Some(B256::ZERO), + gas_limit: attributes.gas_limit, + basefee: attributes.base_fee, + blob_excess_gas_and_price: None, + }; + + Ok(EvmEnv { cfg_env, block_env }) + } + + fn context_for_block<'a>( + &self, + block: &'a SealedBlock<BlockTy<Self::Primitives>>, + ) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> { + Ok(ScrollBlockExecutionCtx { parent_hash: block.header().parent_hash() }) + } + + fn context_for_next_block( + &self, + parent: &SealedHeader<N::BlockHeader>, + _attributes: Self::NextBlockEnvCtx, + ) -> Result<ExecutionCtxFor<'_, Self>, Self::Error> { + Ok(ScrollBlockExecutionCtx { parent_hash: parent.hash() }) + } +} + +impl<ChainSpec, N, R, P> ConfigureEngineEvm<ExecutionData> for ScrollEvmConfig<ChainSpec, N, R, P> +where + ChainSpec: EthChainSpec + ChainConfig<Config = ScrollChainConfig> + ScrollHardforks, + N: NodePrimitives< + Receipt = R::Receipt, + SignedTx = R::Transaction, + BlockHeader = Header, + BlockBody = alloy_consensus::BlockBody<R::Transaction>, + Block = alloy_consensus::Block<R::Transaction>, + >, + ScrollTransactionIntoTxEnv<TxEnv>: + FromRecoveredTx<N::SignedTx> + FromTxWithEncoded<N::SignedTx>, + R: ScrollReceiptBuilder<Receipt = ScrollReceipt, Transaction: SignedTransaction>, + P: ScrollPrecompilesFactory, + Self: Send + Sync + Unpin + Clone + 'static, +{ + fn evm_env_for_payload(&self, payload: &ExecutionData) -> Result<EvmEnvFor<Self>, Self::Error> { + let timestamp = payload.payload.timestamp(); + let block_number = payload.payload.block_number(); + let chain_spec = self.chain_spec(); + + let spec_id = self.spec_id_at_timestamp_and_number(timestamp, block_number); + + let cfg_env = CfgEnv::<ScrollSpecId>::default() + .with_chain_id(chain_spec.chain().id()) + .with_spec(spec_id); + + // get coinbase from chain config. + let coinbase = + if let Some(vault_address) = self.chain_spec().chain_config().fee_vault_address { + vault_address + } else { + payload.payload.as_v1().fee_recipient + }; + + let block_env = BlockEnv { + number: U256::from(block_number), + beneficiary: coinbase, + timestamp: U256::from(timestamp), + difficulty: U256::ONE, + prevrandao: Some(B256::ZERO), + gas_limit: payload.payload.as_v1().gas_limit, + basefee: payload.payload.as_v1().base_fee_per_gas.to(), + blob_excess_gas_and_price: None, + }; + + Ok(EvmEnv { cfg_env, block_env }) + } + + fn context_for_payload<'a>( + &self, + payload: &'a ExecutionData, + ) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> { + Ok(ScrollBlockExecutionCtx { parent_hash: payload.parent_hash() }) + } + + fn tx_iterator_for_payload( + &self, + payload: &ExecutionData, + ) -> Result<impl ExecutableTxIterator<Self>, Self::Error> { + Ok(payload.payload.transactions().clone().into_iter().map(|encoded| { + let tx = TxTy::<Self::Primitives>::decode_2718_exact(encoded.as_ref()) + .map_err(AnyError::new)?; + let signer = tx.try_recover().map_err(AnyError::new)?; + Ok::<_, AnyError>(WithEncoded::new(encoded, tx.with_signer(signer))) + })) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ScrollRethReceiptBuilder; + use alloy_consensus::Header; + use reth_chainspec::{Head, NamedChain::Scroll}; + use reth_scroll_chainspec::{ScrollChainConfig, ScrollChainSpecBuilder}; + use reth_scroll_primitives::ScrollPrimitives; + use revm::primitives::B256; + use revm_primitives::Address; + + #[test] + fn test_spec_at_head() { + let config = ScrollEvmConfig::<_, ScrollPrimitives, _>::new( + ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet()).into(), + ScrollRethReceiptBuilder::default(), + ); + + // prepare all fork heads + let curie_head = &Head { number: 7096836, ..Default::default() }; + let bernoulli_head = &Head { number: 5220340, ..Default::default() }; + let pre_bernoulli_head = &Head { number: 0, ..Default::default() }; + + // check correct spec id + assert_eq!( + config.spec_id_at_timestamp_and_number(curie_head.timestamp, curie_head.number), + ScrollSpecId::CURIE + ); + assert_eq!( + config.spec_id_at_timestamp_and_number(bernoulli_head.timestamp, bernoulli_head.number), + ScrollSpecId::BERNOULLI + ); + assert_eq!( + config.spec_id_at_timestamp_and_number( + pre_bernoulli_head.timestamp, + pre_bernoulli_head.number + ), + ScrollSpecId::SHANGHAI + ); + } + + #[test] + fn test_fill_cfg_env() -> eyre::Result<()> { + let config = ScrollEvmConfig::<_, ScrollPrimitives, _>::new( + ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet()).into(), + ScrollRethReceiptBuilder::default(), + ); + + // curie + let curie_header = Header { number: 7096836, ..Default::default() }; + + // fill cfg env + let env = config.evm_env(&curie_header)?; + + // check correct cfg env + assert_eq!(env.cfg_env.chain_id, Scroll as u64); + assert_eq!(env.cfg_env.spec, ScrollSpecId::CURIE); + + // bernoulli + let bernoulli_header = Header { number: 5220340, ..Default::default() }; + + // fill cfg env + let env = config.evm_env(&bernoulli_header)?; + + // check correct cfg env + assert_eq!(env.cfg_env.chain_id, Scroll as u64); + assert_eq!(env.cfg_env.spec, ScrollSpecId::BERNOULLI); + + // pre-bernoulli + let pre_bernoulli_header = Header { number: 0, ..Default::default() }; + + // fill cfg env + let env = config.evm_env(&pre_bernoulli_header)?; + + // check correct cfg env + assert_eq!(env.cfg_env.chain_id, Scroll as u64); + assert_eq!(env.cfg_env.spec, ScrollSpecId::SHANGHAI); + + Ok(()) + } + + #[test] + fn test_fill_block_env() { + let config = ScrollEvmConfig::<_, ScrollPrimitives, _>::new( + ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet()).into(), + ScrollRethReceiptBuilder::default(), + ); + + // curie header + let header = Header { + number: 7096836, + beneficiary: Address::random(), + timestamp: 1719994277, + mix_hash: B256::random(), + base_fee_per_gas: Some(155157341), + gas_limit: 10000000, + ..Default::default() + }; + + // fill block env + let env = config.evm_env(&header).unwrap(); + + // verify block env correctly updated + let expected = BlockEnv { + number: U256::from(header.number), + beneficiary: config.chain_spec().config.fee_vault_address.unwrap(), + timestamp: U256::from(header.timestamp), + prevrandao: Some(header.mix_hash), + difficulty: U256::ZERO, + basefee: header.base_fee_per_gas.unwrap_or_default(), + gas_limit: header.gas_limit, + blob_excess_gas_and_price: None, + }; + assert_eq!(env.block_env, expected) + } + + #[test] + fn test_next_cfg_and_block_env() -> eyre::Result<()> { + let config = ScrollEvmConfig::<_, ScrollPrimitives, _>::new( + ScrollChainSpecBuilder::scroll_mainnet().build(ScrollChainConfig::mainnet()).into(), + ScrollRethReceiptBuilder::default(), + ); + + // pre curie header + let header = Header { + number: 7096835, + beneficiary: Address::random(), + timestamp: 1719994274, + mix_hash: B256::random(), + base_fee_per_gas: None, + gas_limit: 10000000, + ..Default::default() + }; + + // curie block attributes + let attributes = ScrollNextBlockEnvAttributes { + timestamp: 1719994277, + suggested_fee_recipient: Address::random(), + gas_limit: 10000000, + base_fee: 155157341, + }; + + // get next cfg env and block env + let env = config.next_evm_env(&header, &attributes)?; + let (cfg_env, block_env, spec) = (env.cfg_env.clone(), env.block_env, env.cfg_env.spec); + + // verify cfg env + assert_eq!(cfg_env.chain_id, Scroll as u64); + assert_eq!(spec, ScrollSpecId::CURIE); + + // verify block env + let expected = BlockEnv { + number: U256::from(header.number + 1), + beneficiary: config.chain_spec().config.fee_vault_address.unwrap(), + timestamp: U256::from(attributes.timestamp), + prevrandao: Some(B256::ZERO), + difficulty: U256::ONE, + basefee: 155157341, + gas_limit: header.gas_limit, + blob_excess_gas_and_price: None, + }; + assert_eq!(block_env, expected); + + Ok(()) + } +}
diff --git reth/crates/scroll/evm/src/execute.rs scroll-reth/crates/scroll/evm/src/execute.rs new file mode 100644 index 0000000000000000000000000000000000000000..49fe874c64b4f4314ec5ceccd4bcdd922cef26f4 --- /dev/null +++ scroll-reth/crates/scroll/evm/src/execute.rs @@ -0,0 +1,726 @@ +//! Execution primitives for EVM. + +use crate::ScrollEvmConfig; +use core::fmt::Debug; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, B256}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::Block; + +/// Input for block execution. +#[derive(Debug, Clone, Copy)] +pub struct ScrollBlockExecutionInput { + /// Block number. + pub number: u64, + /// Block timestamp. + pub timestamp: u64, + /// Parent block hash. + pub parent_hash: B256, + /// Block gas limit. + pub gas_limit: u64, + /// Block beneficiary. + pub beneficiary: Address, +} + +impl<B: Block> From<&SealedBlock<B>> for ScrollBlockExecutionInput { + fn from(block: &SealedBlock<B>) -> Self { + Self { + number: block.header().number(), + timestamp: block.header().timestamp(), + parent_hash: block.header().parent_hash(), + gas_limit: block.header().gas_limit(), + beneficiary: block.header().beneficiary(), + } + } +} + +/// Helper type with backwards compatible methods to obtain Scroll executor +/// providers. +pub type ScrollExecutorProvider = ScrollEvmConfig; + +#[cfg(test)] +mod tests { + use crate::{ScrollEvmConfig, ScrollRethReceiptBuilder}; + use std::{convert::Infallible, sync::Arc}; + + use alloy_consensus::{ + transaction::{Recovered, SignerRecoverable}, + Block, BlockBody, Header, SignableTransaction, Signed, Transaction, TxLegacy, + }; + use alloy_eips::{ + eip7702::{constants::PER_EMPTY_ACCOUNT_COST, Authorization, SignedAuthorization}, + Encodable2718, Typed2718, + }; + use alloy_evm::{ + block::{BlockExecutionResult, BlockExecutor}, + precompiles::PrecompilesMap, + Evm, + }; + use alloy_primitives::Sealed; + use reth_chainspec::MIN_TRANSACTION_GAS; + use reth_evm::ConfigureEvm; + use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SignedTransaction}; + use reth_scroll_chainspec::{ScrollChainConfig, ScrollChainSpec, ScrollChainSpecBuilder}; + use reth_scroll_primitives::{ + ScrollBlock, ScrollPrimitives, ScrollReceipt, ScrollTransactionSigned, + }; + use revm::{ + bytecode::Bytecode, + database::{ + states::{bundle_state::BundleRetention, StorageSlot}, + EmptyDBTyped, State, + }, + inspector::NoOpInspector, + primitives::{Address, TxKind, B256, U256}, + state::AccountInfo, + }; + use scroll_alloy_consensus::{ScrollTransactionReceipt, ScrollTxEnvelope, ScrollTxType}; + use scroll_alloy_evm::{ + compute_compressed_size, compute_compression_ratio, + curie::{CURIE_L1_GAS_PRICE_ORACLE_BYTECODE, CURIE_L1_GAS_PRICE_ORACLE_STORAGE}, + gas_price_oracle::*, + ScrollBlockExecutionCtx, ScrollBlockExecutor, ScrollEvm, ScrollTxCompressionInfos, + }; + use scroll_alloy_hardforks::{ForkCondition, ScrollHardfork, ScrollHardforks}; + + const BLOCK_GAS_LIMIT: u64 = 10_000_000; + const SCROLL_CHAIN_ID: u64 = 534352; + const NOT_CURIE_BLOCK_NUMBER: u64 = 7096835; + const CURIE_BLOCK_NUMBER: u64 = 7096837; + const EUCLID_V2_BLOCK_NUMBER: u64 = 14907015; + const EUCLID_V2_BLOCK_TIMESTAMP: u64 = 1745305200; + const FEYNMAN_BLOCK_TIMESTAMP: u64 = 1755576000; + const GALILEO_BLOCK_TIMESTAMP: u64 = 1765868400; + + fn state() -> State<EmptyDBTyped<Infallible>> { + let db = EmptyDBTyped::<Infallible>::new(); + State::builder().with_database(db).with_bundle_update().without_state_clear().build() + } + + #[allow(clippy::type_complexity)] + fn executor<'a>( + block: &RecoveredBlock<ScrollBlock>, + state: &'a mut State<EmptyDBTyped<Infallible>>, + ) -> ScrollBlockExecutor< + ScrollEvm<&'a mut State<EmptyDBTyped<Infallible>>, NoOpInspector, PrecompilesMap>, + ScrollRethReceiptBuilder, + Arc<ScrollChainSpec>, + > { + // build chain spec based on mainnet config, with some fork overrides + let spec_builder = ScrollChainSpecBuilder::scroll_mainnet() + .with_fork(ScrollHardfork::Galileo, ForkCondition::Timestamp(GALILEO_BLOCK_TIMESTAMP)); + let chain_spec = Arc::new(spec_builder.build(ScrollChainConfig::mainnet())); + let evm_config = ScrollEvmConfig::scroll(chain_spec.clone()); + + let evm = + evm_config.evm_for_block(state, block.header()).expect("failed to get evm for block"); + let receipt_builder = ScrollRethReceiptBuilder::default(); + ScrollBlockExecutor::new( + evm, + ScrollBlockExecutionCtx { parent_hash: block.parent_hash }, + chain_spec, + receipt_builder, + ) + } + + fn block( + number: u64, + timestamp: u64, + transactions: Vec<ScrollTransactionSigned>, + ) -> RecoveredBlock<<ScrollPrimitives as NodePrimitives>::Block> { + let senders = transactions.iter().map(|t| t.recover_signer().unwrap()).collect(); + RecoveredBlock::new_unhashed( + Block { + header: Header { + number, + timestamp, + gas_limit: BLOCK_GAS_LIMIT, + ..Default::default() + }, + body: BlockBody { transactions, ..Default::default() }, + }, + senders, + ) + } + + fn transaction(ty: ScrollTxType, gas_limit: u64) -> ScrollTxEnvelope { + let pk = B256::random(); + match ty { + ScrollTxType::Legacy => { + let tx = TxLegacy { + to: TxKind::Call(Address::ZERO), + chain_id: Some(SCROLL_CHAIN_ID), + gas_limit, + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Legacy(Signed::new_unhashed(tx, signature)) + } + ScrollTxType::Eip2930 => { + let tx = alloy_consensus::TxEip2930 { + to: TxKind::Call(Address::ZERO), + chain_id: SCROLL_CHAIN_ID, + gas_limit, + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Eip2930(Signed::new_unhashed(tx, signature)) + } + ScrollTxType::Eip1559 => { + let tx = alloy_consensus::TxEip1559 { + to: TxKind::Call(Address::ZERO), + chain_id: SCROLL_CHAIN_ID, + gas_limit, + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Eip1559(Signed::new_unhashed(tx, signature)) + } + ScrollTxType::Eip7702 => { + let authorization = Authorization { + chain_id: Default::default(), + address: Address::random(), + nonce: 0, + }; + let signature = + reth_primitives::sign_message(B256::random(), authorization.signature_hash()) + .unwrap(); + + let tx = alloy_consensus::TxEip7702 { + to: Address::ZERO, + chain_id: SCROLL_CHAIN_ID, + gas_limit: gas_limit + PER_EMPTY_ACCOUNT_COST, + authorization_list: vec![SignedAuthorization::new_unchecked( + authorization, + signature.v() as u8, + signature.r(), + signature.s(), + )], + ..Default::default() + }; + let signature = reth_primitives::sign_message(pk, tx.signature_hash()).unwrap(); + ScrollTxEnvelope::Eip7702(Signed::new_unhashed(tx, signature)) + } + ScrollTxType::L1Message => { + ScrollTxEnvelope::L1Message(Sealed::new(scroll_alloy_consensus::TxL1Message { + sender: Address::random(), + to: Address::ZERO, + gas_limit, + ..Default::default() + })) + } + } + } + + fn execute_block( + transactions: Vec<ScrollTxEnvelope>, + block_number: u64, + block_timestamp: u64, + compression_infos: Option<ScrollTxCompressionInfos>, + ) -> eyre::Result<BlockExecutionResult<ScrollReceipt>> { + let block = block(block_number, block_timestamp, transactions); + + let mut state = state(); + let mut strategy = executor(&block, &mut state); + + // determine l1 gas oracle storage + let l1_gas_oracle_storage = + if strategy.spec().is_galileo_active_at_timestamp(block_timestamp) { + vec![ + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(10000)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(1000)), + (GPO_BLOB_SCALAR_SLOT, U256::from(10000)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(1_000_000_000u64)), + (GPO_PENALTY_FACTOR_SLOT, U256::from(5u64)), // apply high penalty + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + (GPO_IS_GALILEO_SLOT, U256::from(0)), // only activated in `GalileoV2` + ] + } else if strategy.spec().is_feynman_active_at_timestamp(block_timestamp) { + vec![ + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(10000)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(1000)), + (GPO_BLOB_SCALAR_SLOT, U256::from(10000)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(1_000_000_000u64)), + (GPO_PENALTY_FACTOR_SLOT, U256::from(1_000_000_000u64)), + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + ] + } else if strategy.spec().is_curie_active_at_block(block_number) { + vec![ + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(10000)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(1000)), + (GPO_BLOB_SCALAR_SLOT, U256::from(10000)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + ] + } else { + vec![ + (GPO_L1_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + ] + } + .into_iter() + .collect(); + + // load accounts in state + strategy.evm_mut().db_mut().insert_account_with_storage( + L1_GAS_PRICE_ORACLE_ADDRESS, + Default::default(), + l1_gas_oracle_storage, + ); + for add in block.senders() { + strategy + .evm_mut() + .db_mut() + .insert_account(*add, AccountInfo { balance: U256::MAX, ..Default::default() }); + } + + if let Some(compression_infos) = compression_infos { + Ok(strategy.execute_block_with_compression_cache( + block.transactions_recovered(), + compression_infos, + )?) + } else { + Ok(strategy.execute_block(block.transactions_recovered())?) + } + } + + fn execute_transaction( + tx_type: ScrollTxType, + block_number: u64, + block_timestamp: u64, + expected_l1_fee: U256, + expected_error: Option<&str>, + ) -> eyre::Result<()> { + // prepare transaction + let transaction = transaction(tx_type, MIN_TRANSACTION_GAS); + let block = block(block_number, block_timestamp, vec![transaction.clone()]); + + // init strategy + let mut state = state(); + let mut strategy = executor(&block, &mut state); + + // determine l1 gas oracle storage + let l1_gas_oracle_storage = + if strategy.spec().is_galileo_active_at_timestamp(block_timestamp) { + vec![ + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(10000)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(1000)), + (GPO_BLOB_SCALAR_SLOT, U256::from(10000)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(2_000_000_000u64)), // penalty if <2x + (GPO_PENALTY_FACTOR_SLOT, U256::from(5u64)), /* apply high + * penalty */ + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + (GPO_IS_GALILEO_SLOT, U256::from(0)), // only activated in `GalileoV2` + ] + } else if strategy.spec().is_feynman_active_at_timestamp(block_timestamp) { + vec![ + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(10000)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(1000)), + (GPO_BLOB_SCALAR_SLOT, U256::from(10000)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + (GPO_PENALTY_THRESHOLD_SLOT, U256::from(2_000_000_000u64)), // penalty if <2x + (GPO_PENALTY_FACTOR_SLOT, U256::from(10_000_000_000u64)), // 10x penalty + (GPO_IS_FEYNMAN_SLOT, U256::from(1)), + ] + } else if strategy.spec().is_curie_active_at_block(block_number) { + vec![ + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + (GPO_L1_BLOB_BASE_FEE_SLOT, U256::from(10000)), + (GPO_COMMIT_SCALAR_SLOT, U256::from(1000)), + (GPO_BLOB_SCALAR_SLOT, U256::from(10000)), + (GPO_IS_CURIE_SLOT, U256::from(1)), + ] + } else { + vec![ + (GPO_L1_BASE_FEE_SLOT, U256::from(1000)), + (GPO_OVERHEAD_SLOT, U256::from(1000)), + (GPO_SCALAR_SLOT, U256::from(1000)), + ] + } + .into_iter() + .collect(); + + // load accounts in state + strategy.evm_mut().db_mut().insert_account_with_storage( + L1_GAS_PRICE_ORACLE_ADDRESS, + Default::default(), + l1_gas_oracle_storage, + ); + for add in block.senders() { + strategy + .evm_mut() + .db_mut() + .insert_account(*add, AccountInfo { balance: U256::MAX, ..Default::default() }); + } + + // execute and verify output + let sender = transaction.try_recover()?; + let tx = Recovered::new_unchecked(transaction, sender); + let res = strategy.execute_transaction(&tx); + + // check for error or execution outcome + let output = strategy.apply_post_execution_changes()?; + if let Some(error) = expected_error { + assert!(res.unwrap_err().to_string().contains(error)); + } else { + let BlockExecutionResult { receipts, .. } = output; + let gas_used = + MIN_TRANSACTION_GAS + if tx_type.is_eip7702() { PER_EMPTY_ACCOUNT_COST } else { 0 }; + let inner = alloy_consensus::Receipt { + cumulative_gas_used: gas_used, + status: true.into(), + ..Default::default() + }; + let into_scroll_receipt = |inner: alloy_consensus::Receipt| { + ScrollTransactionReceipt::new(inner, expected_l1_fee) + }; + let receipt = match tx_type { + ScrollTxType::Legacy => ScrollReceipt::Legacy(into_scroll_receipt(inner)), + ScrollTxType::Eip2930 => ScrollReceipt::Eip2930(into_scroll_receipt(inner)), + ScrollTxType::Eip1559 => ScrollReceipt::Eip1559(into_scroll_receipt(inner)), + ScrollTxType::Eip7702 => ScrollReceipt::Eip7702(into_scroll_receipt(inner)), + ScrollTxType::L1Message => ScrollReceipt::L1Message(inner), + }; + let expected = vec![receipt]; + + assert_eq!(receipts, expected); + } + + Ok(()) + } + + #[test] + fn test_apply_pre_execution_changes_curie_block() -> eyre::Result<()> { + // init curie transition block + let curie_block = block(CURIE_BLOCK_NUMBER - 1, 0, vec![]); + + // init strategy + let mut state = state(); + let mut strategy = executor(&curie_block, &mut state); + + // apply pre execution change + strategy.apply_pre_execution_changes()?; + + // take bundle + let state = strategy.evm_mut().db_mut(); + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // assert oracle contract contains updated bytecode + let oracle = bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS).unwrap().clone(); + let oracle_bytecode = oracle.info.unwrap().code.unwrap(); + let bytecode = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); + + // Note: Eq operator fails due to the presence of `table_ptr` in the `JumpTable` struct + // therefore we do a manual comparison. + assert_eq!( + bytecode.legacy_jump_table().unwrap().len(), + oracle_bytecode.legacy_jump_table().unwrap().len() + ); + assert_eq!( + bytecode.legacy_jump_table().unwrap().as_slice(), + oracle_bytecode.legacy_jump_table().unwrap().as_slice() + ); + assert_eq!(bytecode.bytecode(), oracle_bytecode.bytecode()); + + // check oracle contract contains storage changeset + let mut storage = oracle.storage.into_iter().collect::<Vec<(U256, StorageSlot)>>(); + storage.sort_by(|(a, _), (b, _)| a.cmp(b)); + for (got, expected) in storage.into_iter().zip(CURIE_L1_GAS_PRICE_ORACLE_STORAGE) { + assert_eq!(got.0, expected.0); + assert_eq!(got.1, StorageSlot { present_value: expected.1, ..Default::default() }); + } + + Ok(()) + } + + #[test] + fn test_apply_pre_execution_changes_not_curie_block() -> eyre::Result<()> { + // init block + let not_curie_block = block(NOT_CURIE_BLOCK_NUMBER, 0, vec![]); + + // init strategy + let mut state = state(); + let mut strategy = executor(&not_curie_block, &mut state); + + // apply pre execution change + strategy.apply_pre_execution_changes()?; + + // take bundle + let state = strategy.evm_mut().db_mut(); + state.merge_transitions(BundleRetention::Reverts); + let bundle = state.take_bundle(); + + // assert oracle contract is empty + let oracle = bundle.state.get(&L1_GAS_PRICE_ORACLE_ADDRESS); + assert!(oracle.is_none()); + + Ok(()) + } + + #[test] + fn test_execute_transactions_exceeds_block_gas_limit() -> eyre::Result<()> { + // prepare transaction exceeding block gas limit + let transaction = transaction(ScrollTxType::Legacy, BLOCK_GAS_LIMIT + 1); + let block = block(7096837, 0, vec![transaction.clone()]); + + // init strategy + let mut state = state(); + let mut strategy = executor(&block, &mut state); + + // execute and verify error + let sender = transaction.try_recover()?; + let tx = Recovered::new_unchecked(transaction, sender); + let res = strategy.execute_transaction(&tx); + assert_eq!( + res.unwrap_err().to_string(), + "transaction gas limit 10000001 is more than blocks available gas 10000000" + ); + + Ok(()) + } + + #[test] + fn test_execute_transactions_l1_message() -> eyre::Result<()> { + // Execute l1 message on curie block + let expected_l1_fee = U256::ZERO; + execute_transaction(ScrollTxType::L1Message, CURIE_BLOCK_NUMBER, 0, expected_l1_fee, None)?; + Ok(()) + } + + #[test] + fn test_execute_transaction_l1_message_feynman_fork() -> eyre::Result<()> { + // Execute L1 message on feynman block + let expected_l1_fee = U256::ZERO; + execute_transaction( + ScrollTxType::L1Message, + CURIE_BLOCK_NUMBER + 1, + FEYNMAN_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transaction_l1_message_galileo_fork() -> eyre::Result<()> { + // Execute L1 message on galileo block + let expected_l1_fee = U256::ZERO; + execute_transaction( + ScrollTxType::L1Message, + CURIE_BLOCK_NUMBER + 1, + GALILEO_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_legacy_curie_fork() -> eyre::Result<()> { + // Execute legacy transaction on curie block + let expected_l1_fee = U256::from(10); + execute_transaction(ScrollTxType::Legacy, CURIE_BLOCK_NUMBER, 0, expected_l1_fee, None)?; + Ok(()) + } + + #[test] + fn test_execute_transactions_legacy_not_curie_fork() -> eyre::Result<()> { + // Execute legacy before curie block + let expected_l1_fee = U256::from(2); + execute_transaction( + ScrollTxType::Legacy, + NOT_CURIE_BLOCK_NUMBER, + 0, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_legacy_feynman_fork() -> eyre::Result<()> { + // Execute legacy transaction on feynman block + let expected_l1_fee = U256::from(10); + execute_transaction( + ScrollTxType::Legacy, + CURIE_BLOCK_NUMBER + 1, + FEYNMAN_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_legacy_galileo_fork() -> eyre::Result<()> { + // Execute legacy transaction on galileo block + let expected_l1_fee = U256::from(182); + execute_transaction( + ScrollTxType::Legacy, + CURIE_BLOCK_NUMBER + 1, + GALILEO_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip2930_curie_fork() -> eyre::Result<()> { + // Execute eip2930 transaction on curie block + let expected_l1_fee = U256::from(10); + execute_transaction(ScrollTxType::Eip2930, CURIE_BLOCK_NUMBER, 0, expected_l1_fee, None)?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip2930_not_curie_fork() -> eyre::Result<()> { + // Execute eip2930 transaction before curie block + execute_transaction( + ScrollTxType::Eip2930, + NOT_CURIE_BLOCK_NUMBER, + 0, + U256::ZERO, + Some("Eip2930 is not supported"), + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip2930_feynman_fork() -> eyre::Result<()> { + // Execute eip2930 transaction on feynman block + let expected_l1_fee = U256::from(10); + execute_transaction( + ScrollTxType::Eip2930, + CURIE_BLOCK_NUMBER + 1, + FEYNMAN_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip1559_curie_fork() -> eyre::Result<()> { + // Execute eip1559 transaction on curie block + let expected_l1_fee = U256::from(10); + execute_transaction(ScrollTxType::Eip1559, CURIE_BLOCK_NUMBER, 0, expected_l1_fee, None)?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip1559_not_curie_fork() -> eyre::Result<()> { + // Execute eip1559 transaction before curie block + execute_transaction( + ScrollTxType::Eip1559, + NOT_CURIE_BLOCK_NUMBER, + 0, + U256::ZERO, + Some("Eip1559 is not supported"), + )?; + Ok(()) + } + + #[test] + fn test_execute_transaction_eip1559_feynman_fork() -> eyre::Result<()> { + // Execute eip1559 transaction on feynman block + let expected_l1_fee = U256::from(10); + execute_transaction( + ScrollTxType::Eip1559, + CURIE_BLOCK_NUMBER + 1, + FEYNMAN_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip7702_euclid_v2_fork() -> eyre::Result<()> { + // Execute eip7702 transaction on euclid v2 block. + let expected_l1_fee = U256::from(19); + execute_transaction( + ScrollTxType::Eip7702, + EUCLID_V2_BLOCK_NUMBER, + EUCLID_V2_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip7702_not_euclid_v2_fork() -> eyre::Result<()> { + // Execute eip7702 transaction before euclid v2 block + execute_transaction( + ScrollTxType::Eip7702, + EUCLID_V2_BLOCK_NUMBER - 1, + EUCLID_V2_BLOCK_TIMESTAMP - 1, + U256::ZERO, + Some("Eip7702 is not supported"), + )?; + Ok(()) + } + + #[test] + fn test_execute_transactions_eip7702_feynman_fork() -> eyre::Result<()> { + // Execute eip7702 transaction on feynman block + let expected_l1_fee = U256::from(19); + execute_transaction( + ScrollTxType::Eip7702, + CURIE_BLOCK_NUMBER + 1, + FEYNMAN_BLOCK_TIMESTAMP, + expected_l1_fee, + None, + )?; + Ok(()) + } + + #[test] + fn test_consistency_with_provided_compression_ratio() -> eyre::Result<()> { + let transactions = vec![ + transaction(ScrollTxType::Legacy, MIN_TRANSACTION_GAS), + transaction(ScrollTxType::Eip2930, MIN_TRANSACTION_GAS), + transaction(ScrollTxType::Eip1559, MIN_TRANSACTION_GAS), + transaction(ScrollTxType::Eip7702, MIN_TRANSACTION_GAS), + ]; + let compression_infos = transactions + .iter() + .map(|tx| { + (compute_compression_ratio(tx.input()), compute_compressed_size(&tx.encoded_2718())) + }) + .collect::<Vec<_>>(); + let with_compression_infos = execute_block( + transactions.clone(), + CURIE_BLOCK_NUMBER + 1, + FEYNMAN_BLOCK_TIMESTAMP, + Some(compression_infos), + )?; + let without_compression_infos = + execute_block(transactions, CURIE_BLOCK_NUMBER + 1, FEYNMAN_BLOCK_TIMESTAMP, None)?; + assert_eq!(without_compression_infos, with_compression_infos); + Ok(()) + } +}
diff --git reth/crates/scroll/evm/src/l1.rs scroll-reth/crates/scroll/evm/src/l1.rs new file mode 100644 index 0000000000000000000000000000000000000000..1e2934100f3e82ca7623bbbfd1bbe5b34a4f6385 --- /dev/null +++ scroll-reth/crates/scroll/evm/src/l1.rs @@ -0,0 +1,53 @@ +use super::spec_id_at_timestamp_and_number; +use reth_evm::block::BlockExecutionError; +use revm_primitives::U256; +use revm_scroll::l1block::L1BlockInfo; +use scroll_alloy_evm::ScrollTxCompressionInfo; +use scroll_alloy_hardforks::ScrollHardforks; + +/// An extension trait for [`L1BlockInfo`] that allows us to calculate the L1 cost of a transaction +/// based off of the chain spec's activated hardfork. +pub trait RethL1BlockInfo { + /// Forwards an L1 transaction calculation to revm and returns the gas cost. + /// + /// ### Takes + /// - `chain_spec`: The chain spec for the node. + /// - `timestamp`: The timestamp of the current block. + /// - `block`: The block number of the current block. + /// - `input`: The calldata of the transaction. + /// - `compression_info`: An optional (compression ratio, compressed size) pair. + /// - `is_l1_message`: Whether or not the transaction is a l1 message. + fn l1_tx_data_fee( + &mut self, + chain_spec: impl ScrollHardforks, + timestamp: u64, + block: u64, + input: &[u8], + compression_info: Option<ScrollTxCompressionInfo>, + is_l1_message: bool, + ) -> Result<U256, BlockExecutionError>; +} + +impl RethL1BlockInfo for L1BlockInfo { + fn l1_tx_data_fee( + &mut self, + chain_spec: impl ScrollHardforks, + timestamp: u64, + block_number: u64, + input: &[u8], + compression_info: Option<ScrollTxCompressionInfo>, + is_l1_message: bool, + ) -> Result<U256, BlockExecutionError> { + if is_l1_message { + return Ok(U256::ZERO); + } + + let (compression_ratio, compressed_size) = match compression_info { + Some((ratio, size)) => (Some(ratio), Some(size)), + None => (None, None), + }; + + let spec_id = spec_id_at_timestamp_and_number(timestamp, block_number, chain_spec); + Ok(self.calculate_tx_l1_cost(input, spec_id, compression_ratio, compressed_size)) + } +}
diff --git reth/crates/scroll/evm/src/lib.rs scroll-reth/crates/scroll/evm/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..745b31c9016d5e5482af966a0b6a2e68234356ef --- /dev/null +++ scroll-reth/crates/scroll/evm/src/lib.rs @@ -0,0 +1,171 @@ +//! Scroll evm execution implementation. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +mod build; + +mod config; + +mod execute; +pub use execute::{ScrollBlockExecutionInput, ScrollExecutorProvider}; + +mod l1; +pub use l1::RethL1BlockInfo; + +mod base_fee; +pub use base_fee::{ + ScrollBaseFeeProvider, DEFAULT_BASE_FEE_OVERHEAD, DEFAULT_BASE_FEE_SCALAR, + L1_BASE_FEE_PRECISION, MAX_L2_BASE_FEE, +}; + +mod receipt; +pub use receipt::ScrollRethReceiptBuilder; + +mod withdraw_root; +pub use withdraw_root::LoadWithdrawRoot; + +use crate::build::ScrollBlockAssembler; +use alloc::sync::Arc; + +use alloy_primitives::{Address, BlockNumber, BlockTimestamp}; +use reth_primitives_traits::NodePrimitives; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_primitives::ScrollPrimitives; +use revm_scroll::ScrollSpecId; +pub use scroll_alloy_evm::{ + compute_compressed_size, compute_compression_ratio, ScrollBlockExecutorFactory, + ScrollDefaultPrecompilesFactory, ScrollEvmFactory, ScrollTxCompressionInfos, +}; +pub use scroll_alloy_hardforks::{ScrollHardfork, ScrollHardforks}; + +/// Scroll EVM configuration. +#[derive(Debug)] +pub struct ScrollEvmConfig< + ChainSpec = ScrollChainSpec, + N: NodePrimitives = ScrollPrimitives, + R = ScrollRethReceiptBuilder, + P = ScrollDefaultPrecompilesFactory, +> { + /// Executor factory. + executor_factory: ScrollBlockExecutorFactory<R, Arc<ChainSpec>, P>, + /// Block assembler. + block_assembler: ScrollBlockAssembler<ChainSpec>, + /// Node primitives marker. + _pd: core::marker::PhantomData<N>, +} + +impl<ChainSpec: ScrollHardforks> ScrollEvmConfig<ChainSpec> { + /// Creates a new [`ScrollEvmConfig`] with the given chain spec for Scroll chains. + pub fn scroll(chain_spec: Arc<ChainSpec>) -> Self { + Self::new(chain_spec, ScrollRethReceiptBuilder::default()) + } +} + +impl<ChainSpec, N: NodePrimitives, R: Clone, P: Clone> Clone + for ScrollEvmConfig<ChainSpec, N, R, P> +{ + fn clone(&self) -> Self { + Self { + executor_factory: self.executor_factory.clone(), + block_assembler: self.block_assembler.clone(), + _pd: self._pd, + } + } +} + +impl<ChainSpec: ScrollHardforks, N: NodePrimitives, R, P: Default> + ScrollEvmConfig<ChainSpec, N, R, P> +{ + /// Creates a new [`ScrollEvmConfig`] with the given chain spec. + pub fn new(chain_spec: Arc<ChainSpec>, receipt_builder: R) -> Self { + Self { + block_assembler: ScrollBlockAssembler::new(chain_spec.clone()), + executor_factory: ScrollBlockExecutorFactory::new( + receipt_builder, + chain_spec, + ScrollEvmFactory::default(), + ), + _pd: core::marker::PhantomData, + } + } + + /// Returns the chain spec associated with this configuration. + pub const fn chain_spec(&self) -> &Arc<ChainSpec> { + self.executor_factory.spec() + } + + /// Returns the spec id at the given head. + pub fn spec_id_at_timestamp_and_number( + &self, + timestamp: BlockTimestamp, + number: BlockNumber, + ) -> ScrollSpecId { + let chain_spec = self.chain_spec(); + spec_id_at_timestamp_and_number(timestamp, number, chain_spec) + } +} + +/// Returns the spec id at the given timestamp and block number for the provided chain spec. +pub fn spec_id_at_timestamp_and_number( + timestamp: u64, + number: u64, + chain_spec: impl ScrollHardforks, +) -> ScrollSpecId { + if chain_spec + .scroll_fork_activation(ScrollHardfork::GalileoV2) + .active_at_timestamp_or_number(timestamp, number) || + chain_spec + .scroll_fork_activation(ScrollHardfork::Galileo) + .active_at_timestamp_or_number(timestamp, number) + { + ScrollSpecId::GALILEO + } else if chain_spec + .scroll_fork_activation(ScrollHardfork::Feynman) + .active_at_timestamp_or_number(timestamp, number) + { + ScrollSpecId::FEYNMAN + } else if chain_spec + .scroll_fork_activation(ScrollHardfork::EuclidV2) + .active_at_timestamp_or_number(timestamp, number) + { + ScrollSpecId::EUCLID + } else if chain_spec + .scroll_fork_activation(ScrollHardfork::Euclid) + .active_at_timestamp_or_number(timestamp, number) || + chain_spec + .scroll_fork_activation(ScrollHardfork::DarwinV2) + .active_at_timestamp_or_number(timestamp, number) || + chain_spec + .scroll_fork_activation(ScrollHardfork::Darwin) + .active_at_timestamp_or_number(timestamp, number) + { + ScrollSpecId::DARWIN + } else if chain_spec + .scroll_fork_activation(ScrollHardfork::Curie) + .active_at_timestamp_or_number(timestamp, number) + { + ScrollSpecId::CURIE + } else if chain_spec + .scroll_fork_activation(ScrollHardfork::Bernoulli) + .active_at_timestamp_or_number(timestamp, number) + { + ScrollSpecId::BERNOULLI + } else { + ScrollSpecId::SHANGHAI + } +} + +/// The attributes for the next block env. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ScrollNextBlockEnvAttributes { + /// The timestamp of the next block. + pub timestamp: u64, + /// The suggested fee recipient for the next block. + pub suggested_fee_recipient: Address, + /// Block gas limit. + pub gas_limit: u64, + /// The base fee of the next block. + pub base_fee: u64, +}
diff --git reth/crates/scroll/evm/src/receipt.rs scroll-reth/crates/scroll/evm/src/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..062ebc99e096d5c7df330f1ee1040769febf37f5 --- /dev/null +++ scroll-reth/crates/scroll/evm/src/receipt.rs @@ -0,0 +1,37 @@ +use alloy_consensus::{Eip658Value, Receipt}; +use alloy_evm::Evm; +use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; +use scroll_alloy_consensus::{ScrollTransactionReceipt, ScrollTxType}; +use scroll_alloy_evm::{ReceiptBuilderCtx, ScrollReceiptBuilder}; + +/// Basic builder for receipts of [`ScrollTransactionSigned`]. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct ScrollRethReceiptBuilder; + +impl ScrollReceiptBuilder for ScrollRethReceiptBuilder { + type Transaction = ScrollTransactionSigned; + type Receipt = ScrollReceipt; + + fn build_receipt<E: Evm>( + &self, + ctx: ReceiptBuilderCtx<'_, ScrollTransactionSigned, E>, + ) -> Self::Receipt { + let inner = Receipt { + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + status: Eip658Value::Eip658(ctx.result.is_success()), + cumulative_gas_used: ctx.cumulative_gas_used, + logs: ctx.result.into_logs(), + }; + let into_scroll_receipt = |inner: Receipt| ScrollTransactionReceipt::new(inner, ctx.l1_fee); + + match ctx.tx.tx_type() { + ScrollTxType::Legacy => ScrollReceipt::Legacy(into_scroll_receipt(inner)), + ScrollTxType::Eip2930 => ScrollReceipt::Eip2930(into_scroll_receipt(inner)), + ScrollTxType::Eip1559 => ScrollReceipt::Eip1559(into_scroll_receipt(inner)), + ScrollTxType::Eip7702 => ScrollReceipt::Eip7702(into_scroll_receipt(inner)), + ScrollTxType::L1Message => ScrollReceipt::L1Message(inner), + } + } +}
diff --git reth/crates/scroll/evm/src/withdraw_root.rs scroll-reth/crates/scroll/evm/src/withdraw_root.rs new file mode 100644 index 0000000000000000000000000000000000000000..4775b889b55af2eb1f960eac6c8a73f4804acced --- /dev/null +++ scroll-reth/crates/scroll/evm/src/withdraw_root.rs @@ -0,0 +1,99 @@ +use alloy_primitives::{address, Address, U256}; +use revm::{database::State, Database}; + +const L2_MESSAGE_QUEUE_ADDRESS: Address = address!("0x5300000000000000000000000000000000000000"); +const WITHDRAW_TRIE_ROOT_SLOT: U256 = U256::ZERO; + +/// Instance that implements the trait can load the `L2MessageQueue` withdraw root in state. +pub trait LoadWithdrawRoot<DB: Database> { + /// Load the withdrawal root. + fn load_withdraw_root(&mut self) -> Result<(), DB::Error>; +} + +impl<DB: Database> LoadWithdrawRoot<DB> for State<DB> { + fn load_withdraw_root(&mut self) -> Result<(), DB::Error> { + // we load the account in cache and query the storage slot. The storage slot will only be + // loaded from database if it is not already know. + self.load_cache_account(L2_MESSAGE_QUEUE_ADDRESS)?; + let _ = revm::Database::storage(self, L2_MESSAGE_QUEUE_ADDRESS, WITHDRAW_TRIE_ROOT_SLOT); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{collections::HashMap, convert::Infallible}; + + use alloy_primitives::B256; + use revm::{bytecode::Bytecode, state::AccountInfo}; + use revm_primitives::{StorageKey, StorageValue}; + + #[derive(Default)] + struct InMemoryDb { + pub accounts: HashMap<Address, AccountInfo>, + pub storage: HashMap<(Address, U256), U256>, + } + + impl Database for InMemoryDb { + type Error = Infallible; + + fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> { + Ok(self.accounts.get(&address).cloned()) + } + + fn code_by_hash(&mut self, _code_hash: B256) -> Result<Bytecode, Self::Error> { + Ok(Default::default()) + } + + fn storage( + &mut self, + address: Address, + index: StorageKey, + ) -> Result<StorageValue, Self::Error> { + Ok(self.storage.get(&(address, index)).copied().unwrap_or_default()) + } + + fn block_hash(&mut self, _number: u64) -> Result<B256, Self::Error> { + Ok(Default::default()) + } + } + + #[test] + fn test_should_load_withdraw_root() -> eyre::Result<()> { + // init db + let mut db = InMemoryDb::default(); + + // load L2 message queue contract + let withdraw_root = U256::random(); + db.accounts.insert(L2_MESSAGE_QUEUE_ADDRESS, Default::default()); + db.storage.insert((L2_MESSAGE_QUEUE_ADDRESS, U256::ZERO), withdraw_root); + + let mut state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + + assert!(state + .cache + .accounts + .get(&L2_MESSAGE_QUEUE_ADDRESS) + .map(|acc| acc.storage_slot(WITHDRAW_TRIE_ROOT_SLOT)) + .is_none()); + + // load root + state.load_withdraw_root()?; + + assert_eq!( + state + .cache + .accounts + .get(&L2_MESSAGE_QUEUE_ADDRESS) + .unwrap() + .storage_slot(WITHDRAW_TRIE_ROOT_SLOT) + .unwrap(), + withdraw_root + ); + + Ok(()) + } +}
diff --git reth/crates/scroll/hardforks/Cargo.toml scroll-reth/crates/scroll/hardforks/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..bd69a2a489a6663f42d2a38975dba913856252f5 --- /dev/null +++ scroll-reth/crates/scroll/hardforks/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "reth-scroll-forks" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Scroll hardforks used in reth" + +[lints] +workspace = true + +[dependencies] +# reth +reth-ethereum-forks.workspace = true + +# ethereum +scroll-alloy-hardforks.workspace = true +alloy-chains.workspace = true +alloy-primitives.workspace = true + +# io +serde = { workspace = true, optional = true } + +# misc +auto_impl.workspace = true +once_cell.workspace = true + +[features] +default = ["std"] +std = [ + "alloy-primitives/std", + "once_cell/std", + "serde?/std", + "alloy-chains/std", + "reth-ethereum-forks/std", + "scroll-alloy-hardforks/std", +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-primitives/serde", + "reth-ethereum-forks/serde", + "scroll-alloy-hardforks/serde", +]
diff --git reth/crates/scroll/hardforks/docs/hardforks.md scroll-reth/crates/scroll/hardforks/docs/hardforks.md new file mode 100644 index 0000000000000000000000000000000000000000..bf2adf9bd84c20a8372d37357de9bbc4fa481815 --- /dev/null +++ scroll-reth/crates/scroll/hardforks/docs/hardforks.md @@ -0,0 +1,433 @@ +--- +section: technology +date: Last Modified +title: "Scroll Upgrades" +lang: "en" +permalink: "technology/overview/scroll-upgrades" +--- + +As the team continues to progress on Scroll's roadmap, we will be upgrading the Scroll network to include new features +and improvements. + +The following contracts are used to initiate upgrades and execute upgrades after the two-week timelock period: + +| Contract | Network | Address | +|--------------------|----------|---------------------------------------------------------------------------------------------------------------------------| +| L1 Scroll Multisig | Ethereum | [`0xEfc9D1096fb65c832207E5e7F13C2D1102244dbe`](https://etherscan.io/address/0xEfc9D1096fb65c832207E5e7F13C2D1102244dbe) | +| L1 Timelock | Ethereum | [`0x1A658B88fD0a3c82fa1a0609fCDbD32e7dd4aB9C`](https://etherscan.io/address/0x1A658B88fD0a3c82fa1a0609fCDbD32e7dd4aB9C) | +| L2 Scroll Multisig | Scroll | [`0xEfc9D1096fb65c832207E5e7F13C2D1102244dbe`](https://scrollscan.com/address/0xEfc9D1096fb65c832207E5e7F13C2D1102244dbe) | +| L2 Timelock | Scroll | [`0xf6069DB81239E5194bb53f83aF564d282357bc99`](https://scrollscan.com/address/0xf6069DB81239E5194bb53f83aF564d282357bc99) | + +You can join our [Telegram channel for technical updates](https://t.me/scroll_tech_updates), which includes future +upgrade announcements and on-chain operation events. + +## `DarwinV2` Upgrade + +### Overview + +During internal testing, we identified that blocks may not always be compressible under certain conditions, which leads +to unprovable chunks and batches. +To fix this issue, a minor upgrade has been conducted so that uncompressed blobs will be enabled when this special case +is detected. + +### Timeline + +As this is a security related patch, we bypassed the 7-day timelock mechanism. + +- **Scroll Sepolia**: August 28th, 2024 +- **Scroll Mainnet**: September 2nd, 2024 + +### Compatibility + +#### Sequencer and Follower Nodes (l2geth) + +The new node version is `v5.7.0`. See +the [release notes](https://github.com/scroll-tech/go-ethereum/releases/tag/scroll-v5.7.0) for more information. + +This upgrade does not change Scroll's state transition function, so it is backward compatible. However, the format of +the batch data committed to Ethereum changes. As a result, nodes that enabled rollup verification (`--rollup.verify`) +must upgrade to be able to follow the chain. + +#### Dapps and Indexers + +A change has been implemented to Scroll Mainnet to enhance sequencer throughput, which adjusted the maximum reorg depth +to 17 blocks. Previously, the system performed thorough capacity checks within the signer thread to determine whether +transactions exceed the circuit limit. While this ensures that all transactions within a block are compliant, it also +requires additional CPU resources. +We introduced a new circuit capacity checking scheme on Mainnet. The sequencer thread now will continue to perform +capacity checks, but in a more approximate manner. In parallel, 16 worker threads will accurately verify the capacity of +previous blocks. As a result, a reorg could occur with a maximum depth of 17 blocks, although the likelihood of this is +low. + +For indexers, the `BatchHeader` version has been upgraded to 4. This is backward compatible (the only exception is for +developers decoding the blob payload, which has changed slightly). + +## Darwin Upgrade + +### Overview + +This upgrade will reduce gas fees by 34% by using a single aggregated proof for multiple batches, eliminating the need +to finalize each batch individually. + +- Darwin uses a new [V3 batch codec](https://github.com/scroll-tech/da-codec/tree/main/encoding/codecv3). +- In addition to the previous notions of `chunk` and `batch`, we have introduced a new concept called `bundle`. + - `Chunk`: A unit of zkEVM proving, consisting of a list of L2 blocks. + - `Batch`: A collection of chunks encoded into one EIP-4844 blob, serving as the unit of Data Availability. + - `Bundle`: A series of batches that functions as the unit of finalization. + + The main difference compared to Curie is that Scroll will now finalize multiple batches using a single aggregated + bundle proof. + +- The on-chain bundle proof verifier uses a new public input layout. + +### Timeline + +- **Scroll Sepolia** + - Network Upgrade: August 14th, 2024 +- **Scroll Mainnet** + - Upgrade Initiation: August 5th, 2024 + - Timelock Completion & Upgrade: August 21st, 2024 + +### Technical Details + +#### Contract Changes + +*Note: Since the previous Curie upgrade, we have migrated the Scroll contracts to a new repo +at [scroll-contracts](https://github.com/scroll-tech/scroll-contracts).* + +The code changes for this upgrade are implemented in [this PR](https://github.com/scroll-tech/scroll-contracts/pull/4). +The key changes are as follows: + +- We have introduced a new `BatchHeaderV3Codec`. +- We have changed how messages are processed in the `L1MessageQueue` contract. Prior to Darwin, we would process + messages when a batch is finalized. After Darwin, most of this processing is moved to the commit step. +- We have introduced a new public input format for bundle proofs. This is implemented in a new contract + `IZkEvmVerifierV2`, which is in turn added to `MultipleVersionRollupVerifier`. +- In the `ScrollChain` contract `version=3` batches will now be committed through a new function called + `commitBatchWithBlobProof`. Bundles will be finalized using a new function called `finalizeBundleWithProof`. + +See the contract [release notes](https://github.com/scroll-tech/scroll-contracts/releases/tag/v1.0.0) for more +information. + +#### Node Changes + +The new node version is `v5.6.0`. See +the [release notes](https://github.com/scroll-tech/go-ethereum/releases/tag/scroll-v5.6.0) for more information. + +The main changes are: + +- Implementation of timestamp-based hard forks. +- Processing V3 batch codec in rollup-verifier. + +#### zkEVM circuit changes + +The new version of zkevm circuits is `v0.12.0`. +See [here](https://github.com/scroll-tech/zkevm-circuits/releases/tag/v0.12.0) for the release log. + +We have introduced a `RecursionCircuit` that will bundle multiple sequential batches by recursively aggregating the +SNARKs from the `BatchCircuit` (previously `AggregationCircuit`). The previously 5 layer proving system is now 7 layers +as we introduce: + +- 6th Layer (layer5): `RecursionCircuit` that recursively aggregates `BatchCircuit` SNARKs. +- 7th Layer (layer6): `CompressionCircuit` that compresses the `RecursionCircuit` SNARK and produce an EVM-verifiable + validity proof. + +The public input to the `BatchCircuit` is now context-aware of the “previous” `batch`, which allows us to implement the +recursion scheme we adopted ( +described [here](https://scrollzkp.notion.site/Upgrade-4-Darwin-Documentation-05a3ecb59e9d4f288254701f8c888173) +in-depth). + +#### Audits + +- TrailofBits: coming soon! + +### Compatibility + +#### Sequencer and Follower Nodes (l2geth) + +This upgrade does not alter the state transition function and is therefore backward-compatible. However, we strongly +recommend node operators to upgrade to [v5.6.0](https://github.com/scroll-tech/go-ethereum/releases/tag/scroll-v5.6.0). + +#### Dapps and Indexers + +There are some major changes to how we commit and finalize batches after Darwin. + +- Batches will be encoded using the + new [V3 batch codec](https://github.com/scroll-tech/da-codec/tree/main/encoding/codecv3). This version adds two new + fields: + 1. `lastBlockTimestamp` (the timestamp of the last block in this batch). + 2. `blobDataProof` (the KZG challenge point evaluation proof). + + This version removes `skippedL1MessageBitmap`. There will be no changes to how the blob data is encoded and + compressed. +- Batches will be committed using the `commitBatchWithBlobProof` function (instead of the previous `commitBatch`). + + New function signature: + + ```solidity + function commitBatchWithBlobProof(uint8 _version, bytes calldata _parentBatchHeader, bytes[] memory _chunks, bytes calldata _skippedL1MessageBitmap, bytes calldata _blobDataProof) + ``` + +- Batches will be finalized using the `finalizeBundleWithProof` function (instead of the previous + `finalizeBatchWithProof4844`). + + New function signature: + + ```solidity + function finalizeBundleWithProof(bytes calldata _batchHeader, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes calldata _aggrProof) + ``` + +- The semantics of the `FinalizeBatch` event will change: It will now mean that all batches between the last finalized + batch and the event’s `_batchIndex` have been finalized. The event’s stateRoot and withdrawRoot values belong to the + last finalized batch in the bundle. Finalized roots for intermediate batches are no longer available. + + The semantics of the `CommitBatch` and `RevertBatch` events will not change. + +Recommendations: + +- Indexers that decode committed batch data should be adjusted to use the new codec and the new function signature. +- Indexers that track batch finalization status should be adjusted to consider the new event semantics. + +## Curie Upgrade + +### Overview + +This significant upgrade will reduce gas fees on the Scroll chain by 1.5x. Highlights include: + +- Compresses the data stored in blobs using the [zstd](https://github.com/scroll-tech/da-codec/tree/main/libzstd) + algorithm. This compression reduces the data size, allowing each blob to store more transactions, thereby reducing + data availability cost per transaction. +- Adopts a modified version of the EIP-1559 pricing model which is compatible with the EIP-1559 transaction interface, + bringing benefits such as more accurate transaction pricing and a more predictable and stable fee structure. +- Support for new EVM opcodes `TLOAD`, `TSTORE`, and `MCOPY`. Users can safely use the latest Solidity compiler version + `0.8.26` to build the contracts. +- Introduces a dynamic block time. During periods of traffic congestion, a block will be packed when the number of + transactions reaches the circuit limit instead of waiting for the 3-second interval. + +### Timeline + +- **Scroll Sepolia** + - Network Upgrade: June 17th, 2024 +- **Scroll Mainnet** + - Upgrade Initiation: June 20th, 2024 + - Timelock Completion & Upgrade: July 3rd, 2024 + +### Technical Details + +#### Contract Changes + +The code changes for this upgrade are documented in the following PRs: + +- [Accept compressed batches](https://github.com/scroll-tech/scroll/pull/1317) +- [Update `L1GasPriceOracle`](https://github.com/scroll-tech/scroll/pull/1343) +- [Change `MAX_COMMIT_SCALAR` and `MAX_BLOB_SCALAR` to 1e18](https://github.com/scroll-tech/scroll/pull/1354) +- [Remove batch index check when updating a verifier](https://github.com/scroll-tech/scroll/pull/1372) + +The main changes are as follows: + +- The rollup contract (`ScrollChain`) will now accept batches with both versions 1 and + 2. [Version 1](https://github.com/scroll-tech/da-codec/tree/main/encoding/codecv1) is used for uncompressed blobs ( + pre-Curie), while [version 2](https://github.com/scroll-tech/da-codec/tree/main/encoding/codecv2) is used for + compressed blobs (post-Curie). +- The `L1GasPriceOracle` contract will be updated to change the data fee formula to account for blob DA, providing a + more accurate estimation of DA costs: + - Original formula: `(l1GasUsed(txRlp) + overhead) * l1BaseFee * scalar` + - New formula: `l1BaseFee * commitScalar + len(txRlp) * l1BlobBaseFee * blobScalar` + +#### Node Changes + +The new node version is `v5.5.0`. See +the [release notes](https://github.com/scroll-tech/go-ethereum/releases/tag/scroll-v5.5.0) for the list of changes. + +#### zkEVM circuit changes + +The new version of zkevm circuits is `v0.11.4`. +See [here](https://github.com/scroll-tech/zkevm-circuits/releases/tag/v0.11.4) for the release log. + +#### Audits + +- TrailofBits: coming soon! +- [Zellic](https://github.com/Zellic/publications/blob/master/Scroll%20zkEVM%20-%20Zellic%20Audit%20Report.pdf) + +### Compatibility + +#### Sequencer and Follower Nodes (l2geth) + +This upgrade is a hard fork, introducing the `TLOAD`, `TSTORE`, and `MCOPY` opcodes. Operators running an `l2geth` node +are required to upgrade before the hard fork block. For more information, see +the [node release note](https://github.com/scroll-tech/go-ethereum/releases/tag/scroll-v5.4.2). + +#### Dapps and Indexers + +For dApps, this upgrade is backward compatible. Developers should adjust the gas fee settings to incorporate the +EIP-1559 pricing model. Note that dApps can no longer rely on the fixed 3-second block time in the application logic. + +For indexers, the [data format](https://docs.scroll.io/en/technology/chain/rollup/#codec) remains the same. The will be +however changes to the data content: + +- The `version` field in `BatchHeader` will be changed to 2 since Curie block. +- The data stored in blob will be compressed and can be decompressed + by [zstd v1.5.6](https://github.com/facebook/zstd/releases/tag/v1.5.6). + +## Bernoulli Upgrade + +### Overview + +This upgrade features a significant reduction in transaction costs by introducing support for EIP-4844 data blobs and +supporting the SHA2-256 precompile. + +### Timeline + +- **Scroll Sepolia** + - Network Upgrade: April 15th, 2024 +- **Scroll Mainnet** + - Upgrade Initiation: April 15th, 2024 + - Timelock Completion & Upgrade: April 29th, 2024 + +### Technical Details + +#### Contract changes + +The contract changes for this upgrade are in [this PR](https://github.com/scroll-tech/scroll/pull/1179), along with the +audit +fixes [here](https://github.com/scroll-tech/scroll/pulls?q=is%3Apr+created%3A2024-04-10..2024-04-11+fix+in%3Atitle+label%3Abug). +The main changes are as follows: + +- `ScrollChain` now accepts batches with either calldata or blob encoding in `commitBatch`. +- `ScrollChain` now supports finalizing blob-encoded batches through `finalizeBatchWithProof4844`. +- `MultipleVersionRollupVerifier` can now manage different on-chain verifiers for each batch encoding version. + +#### Node changes + +The new node version is `v5.3.0`. See [here](https://github.com/scroll-tech/go-ethereum/releases/tag/scroll-v5.3.0) for +the release log. + +#### zkEVM circuit changes + +The new version of zkevm circuits is `v0.10.3`. +See [here](https://github.com/scroll-tech/zkevm-circuits/releases/tag/v0.10.3) for the release log. + +#### Audits + +- [OpenZeppelin](https://blog.openzeppelin.com/scroll-eip-4844-support-audit) +- [TrailofBits](https://github.com/trailofbits/publications/blob/master/reviews/2024-04-scroll-4844-blob-securityreview.pdf) + +### Compatibility + +#### Sequencer and follower nodes (l2geth) + +This upgrade is a hard fork as it introduces the new blob data type and the SHA2-256 precompiled contract. Operators +running an `l2geth` node are required to upgrade before the hard fork block. See +the [node releases](https://github.com/scroll-tech/go-ethereum/releases) for more information. + +#### Indexers and Bridges + +This upgrade changes the format that Scroll uses to publish data to Ethereum. Projects that rely on this data should +carefully review [the new data format](/en/technology/chain/rollup/#codec), and check whether their decoders need to be +adjusted. A summary of the new format: + +- The format of [ + `BlockContext`](https://github.com/scroll-tech/scroll/blob/5362e28f744093495c1c09a6b68fc96a3264278b/common/types/encoding/codecv1/codecv1.go#L125) + will not change. +- `Chunks` + will [no longer include](https://github.com/scroll-tech/scroll/blob/5362e28f744093495c1c09a6b68fc96a3264278b/common/types/encoding/codecv1/codecv1.go#L162) + the L2 transaction data. This will instead + be [stored in a blob](https://github.com/scroll-tech/scroll/blob/5362e28f744093495c1c09a6b68fc96a3264278b/common/types/encoding/codecv1/codecv1.go#L284) + attached to the `commitBatch` transaction. +- `BatchHeader` now contains one new field, [ + `BlobVersionedHash`](https://github.com/scroll-tech/scroll/blob/5362e28f744093495c1c09a6b68fc96a3264278b/common/types/encoding/codecv1/codecv1.go#L405). + +#### Provers + +This upgrade involves a breaking change in [zkevm-circuits](https://github.com/scroll-tech/zkevm-circuits). Operators +running a prover node are required to upgrade. + +## Bridge Upgrade + +### Overview + +To reduce bridging costs, we implemented several gas optimizations on our bridge and rollup contract suite. The +optimization techniques used include the following: + +- We will now use constants to store some companion contract addresses, instead of using storage variables. This is + possible since these values should (almost) never change. With this change we can save on a few storage load + operations. +- We updated the intrinsic gas estimation in `L1MessageQueue` to use a simple upper bound instead of an exact + calculation. The two results will be similar for most bridge transactions but the new implementation is significantly + cheaper. +- We merged two contracts `L1MessageQueue` and `L2GasPriceOracle` to save on call costs from one contract to the other. + +### Timeline + +- **Scroll Sepolia:** + - Network Upgrade: January 19, 2024 +- **Scroll Mainnet:** + - Upgrade Initiation: February 7, 2024 + - Timelock Completion & Upgrade: February 21, 2024 + +### Technical Details + +#### Code Changes + +- [Bridge Cost Optimization](https://github.com/scroll-tech/scroll/pull/1011) +- [Audit Fixes](https://github.com/scroll-tech/scroll/pulls?q=OZ+is%3Apr+created%3A2024-01-27..2024-02-10) +- [Previously deployed version](https://github.com/scroll-tech/scroll/tree/ff380141a8cbcc214dc65f17ffa44faf4be646b6) ( + commit `ff380141a8cbcc214dc65f17ffa44faf4be646b6`) +- [Version deployed](https://github.com/scroll-tech/scroll/tree/6030927680a92d0285c2c13e6bb27ed27d1f32d1) (commit + `6030927680a92d0285c2c13e6bb27ed27d1f32d1`) + +#### Audits + +- [OpenZeppelin](https://blog.openzeppelin.com/scroll-bridge-gas-optimizations-audit) + +#### List of Changes + +**Changes to L1 contracts:** + +- In `ScrollChain`, change `messageQueue` and `verifier` to `immutable`. +- In `L1ScrollMessenger`, change `counterpart`, `rollup`, and `messageQueue` to `immutable`. +- In all token gateways, change `counterpart`, `router`, and `messenger` to `immutable`. +- Merge `L1MessageQueue` and `L2GasPriceOracle` into a single contract `L1MessageQueueWithGasPriceOracle` (deployed on + the same address as the previous `L1MessageQueue`). In this contract, we also change `messenger` and `scrollChain` to + `immutable`, and simplify `calculateIntrinsicGasFee`. + +**Changes to L2 contracts:** + +- In `L2ScrollMessenger`, change `counterpart` to `immutable`. +- In all token gateways, change `counterpart`, `router`, and `messenger` to `immutable`. + +**Contracts affected:** + +- **L1:** `L1MessageQueue`, `L2GasPriceOracle`, `ScrollChain`, `L1WETHGateway`, `L1StandardERC20Gateway`, + `L1GatewayRouter`, `L1ScrollMessenger`, `L1CustomERC20Gateway`, `L1ERC721Gateway`, `L1ERC1155Gateway`. +- **L2:** `L2ScrollMessenger`, `L2WETHGateway`, `L2StandardERC20Gateway`, `L2GatewayRouter`, `L2CustomERC20Gateway`, + `L2ERC721Gateway`, `L2ERC1155Gateway`. + +#### Compatibility + +##### Sequencer and follower nodes (l2geth) + +Operators running an `l2geth` node do not need to upgrade. The changes in this upgrade will not affect `l2geth`. + +##### Dapps and indexers + +Dapps and indexers (and similar off-chain infrastructure) that query contracts or rely on contract interfaces would, in +most cases, not need to be changed. The majority of the contract changes are internal and/or backward compatible. + +If your application depends on [ +`L2GasPriceOracle`](https://etherscan.io/address/0x987e300fDfb06093859358522a79098848C33852) to monitor how Scroll keeps +track of the L2 gas price on L1, from the upgrade block number you will need to start monitoring [ +`L1MessageQueueWithGasPriceOracle`](https://etherscan.io/address/0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B). + +The original gas price oracle contract will be deprecated: it will no longer be updated or used by the Scroll bridge. + +- Ethereum: + - `L2GasPriceOracle`: [ + `0x987e300fDfb06093859358522a79098848C33852`](https://etherscan.io/address/0x987e300fDfb06093859358522a79098848C33852) + - `L1MessageQueueWithGasPriceOracle`: [ + `0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B`](https://etherscan.io/address/0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B) +- Sepolia: + - `L2GasPriceOracle`: [ + `0x247969F4fad93a33d4826046bc3eAE0D36BdE548`](https://sepolia.etherscan.io/address/0x247969F4fad93a33d4826046bc3eAE0D36BdE548) + - `L1MessageQueueWithGasPriceOracle`: [ + `0xF0B2293F5D834eAe920c6974D50957A1732de763`](https://sepolia.etherscan.io/address/0xF0B2293F5D834eAe920c6974D50957A1732de763) \ No newline at end of file
diff --git reth/crates/scroll/hardforks/src/lib.rs scroll-reth/crates/scroll/hardforks/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..29427ee063200b3cafb749d823d5be43caf26c99 --- /dev/null +++ scroll-reth/crates/scroll/hardforks/src/lib.rs @@ -0,0 +1,98 @@ +//! Scroll-Reth hard forks. + +#![cfg_attr(not(feature = "std"), no_std)] +#![doc = include_str!("../docs/hardforks.md")] +#[cfg(not(feature = "std"))] +extern crate alloc as std; + +use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; + +// Re-export scroll-alloy-hardforks types. +pub use scroll_alloy_hardforks::{ScrollHardfork, ScrollHardforks}; + +#[cfg(not(feature = "std"))] +use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "std")] +use std::sync::LazyLock; +use std::vec; + +/// Scroll mainnet hardforks +pub static SCROLL_MAINNET_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| { + ChainHardforks::new(vec![ + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Dao.boxed(), ForkCondition::Never), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Never), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Never), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Never), + (ScrollHardfork::Archimedes.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)), + (ScrollHardfork::Bernoulli.boxed(), ForkCondition::Block(5220340)), + (ScrollHardfork::Curie.boxed(), ForkCondition::Block(7096836)), + (ScrollHardfork::Darwin.boxed(), ForkCondition::Timestamp(1724227200)), + (ScrollHardfork::DarwinV2.boxed(), ForkCondition::Timestamp(1725264000)), + (ScrollHardfork::Euclid.boxed(), ForkCondition::Timestamp(1744815600)), + (ScrollHardfork::EuclidV2.boxed(), ForkCondition::Timestamp(1745305200)), + (ScrollHardfork::Feynman.boxed(), ForkCondition::Timestamp(1755576000)), + (ScrollHardfork::Galileo.boxed(), ForkCondition::Timestamp(1765868400)), + (ScrollHardfork::GalileoV2.boxed(), ForkCondition::Timestamp(1766041200)), + ]) +}); + +/// Scroll sepolia hardforks +pub static SCROLL_SEPOLIA_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| { + ChainHardforks::new(vec![ + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (ScrollHardfork::Archimedes.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)), + (ScrollHardfork::Bernoulli.boxed(), ForkCondition::Block(3747132)), + (ScrollHardfork::Curie.boxed(), ForkCondition::Block(4740239)), + (ScrollHardfork::Darwin.boxed(), ForkCondition::Timestamp(1723622400)), + (ScrollHardfork::DarwinV2.boxed(), ForkCondition::Timestamp(1724832000)), + (ScrollHardfork::Euclid.boxed(), ForkCondition::Timestamp(1741680000)), + (ScrollHardfork::EuclidV2.boxed(), ForkCondition::Timestamp(1741852800)), + (ScrollHardfork::Feynman.boxed(), ForkCondition::Timestamp(1753167600)), + (ScrollHardfork::Galileo.boxed(), ForkCondition::Timestamp(1764054000)), + (ScrollHardfork::GalileoV2.boxed(), ForkCondition::Timestamp(1764831600)), + ]) +}); + +/// Dev hardforks +pub static DEV_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| { + ChainHardforks::new(vec![ + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (ScrollHardfork::Archimedes.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::Bernoulli.boxed(), ForkCondition::Block(0)), + (ScrollHardfork::Curie.boxed(), ForkCondition::Block(0)), + (ScrollHardfork::Darwin.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::DarwinV2.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::Euclid.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::EuclidV2.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::Feynman.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::Galileo.boxed(), ForkCondition::Timestamp(0)), + (ScrollHardfork::GalileoV2.boxed(), ForkCondition::Timestamp(0)), + ]) +});
diff --git reth/crates/scroll/node/Cargo.toml scroll-reth/crates/scroll/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..3ee16294467c65d5496ab267898c3aa3dee8a058 --- /dev/null +++ scroll-reth/crates/scroll/node/Cargo.toml @@ -0,0 +1,119 @@ +[package] +name = "reth-scroll-node" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-db = { workspace = true, features = ["scroll-alloy-traits"] } +reth-engine-local = { workspace = true, features = ["scroll-alloy-traits"] } +reth-eth-wire-types.workspace = true +reth-evm = { workspace = true, features = ["scroll-alloy-traits"] } +reth-e2e-test-utils = { workspace = true, optional = true } +reth-network.workspace = true +reth-node-api.workspace = true +reth-node-core = { workspace = true, optional = true } +reth-node-types.workspace = true +reth-node-builder.workspace = true +reth-payload-builder.workspace = true +reth-primitives = { workspace = true, features = ["c-kzg"] } +reth-primitives-traits.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-server-types = { workspace = true, optional = true } +reth-tasks = { workspace = true, optional = true } +reth-tracing.workspace = true +reth-transaction-pool.workspace = true +reth-trie-db.workspace = true + +# revm +revm = { workspace = true, features = ["c-kzg"] } + +# alloy +alloy-consensus.workspace = true +alloy-genesis = { workspace = true, optional = true } +alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true + +# scroll-reth +reth-scroll-chainspec.workspace = true +reth-scroll-consensus.workspace = true +reth-scroll-engine-primitives.workspace = true +reth-scroll-evm.workspace = true +reth-scroll-payload.workspace = true +reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } +reth-scroll-rpc.workspace = true +reth-scroll-txpool.workspace = true + +# scroll-alloy +scroll-alloy-consensus.workspace = true +scroll-alloy-evm.workspace = true +scroll-alloy-hardforks.workspace = true +scroll-alloy-network.workspace = true +scroll-alloy-rpc-types.workspace = true +scroll-alloy-rpc-types-engine.workspace = true + +# misc +clap.workspace = true +eyre.workspace = true +serde_json = { workspace = true, optional = true } +tracing.workspace = true +tokio.workspace = true + +[dev-dependencies] +reth-scroll-node = { workspace = true, features = ["test-utils"] } +reth-db.workspace = true +reth-node-core.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } +reth-tasks.workspace = true + +alloy-primitives.workspace = true +scroll-alloy-consensus.workspace = true +alloy-consensus.workspace = true + +[features] +default = ["reth-codec", "scroll-alloy-traits"] +reth-codec = ["reth-scroll-primitives/reth-codec"] +test-utils = [ + "dep:alloy-genesis", + "dep:reth-e2e-test-utils", + "dep:reth-node-core", + "dep:reth-rpc-server-types", + "dep:reth-tasks", + "dep:serde_json", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-node-builder/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils", + "reth-scroll-payload/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "reth-db/test-utils", + "reth-revm/test-utils", + "reth-scroll-node/test-utils", +] +scroll-alloy-traits = [ + "reth-db/scroll-alloy-traits", + "reth-evm/scroll-alloy-traits", + "reth-primitives-traits/scroll-alloy-traits", + "reth-scroll-node/scroll-alloy-traits", + "reth-engine-local/scroll-alloy-traits", +] +js-tracer = ["reth-scroll-rpc/js-tracer"]
diff --git reth/crates/scroll/node/src/addons.rs scroll-reth/crates/scroll/node/src/addons.rs new file mode 100644 index 0000000000000000000000000000000000000000..a30d24d45b16e217f70cdb87f7cc90e100710537 --- /dev/null +++ scroll-reth/crates/scroll/node/src/addons.rs @@ -0,0 +1,252 @@ +use crate::{ + builder::{engine::ScrollEngineValidatorBuilder, payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT}, + ScrollStorage, +}; +use reth_evm::{ConfigureEngineEvm, EvmFactory, EvmFactoryFor}; +use reth_node_api::{AddOnsContext, NodeAddOns, PayloadTypes}; +use reth_node_builder::{ + rpc::{ + BasicEngineApiBuilder, BasicEngineValidatorBuilder, EngineValidatorAddOn, EthApiBuilder, + Identity, RethRpcAddOns, RethRpcMiddleware, RpcAddOns, RpcHandle, + }, + FullNodeComponents, +}; +use reth_node_types::NodeTypes; +use reth_revm::context::BlockEnv; +use reth_rpc_eth_types::error::FromEvmError; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_engine_primitives::ScrollEngineTypes; +use reth_scroll_evm::ScrollNextBlockEnvAttributes; +use reth_scroll_primitives::ScrollPrimitives; +use reth_scroll_rpc::{ + eth::{ScrollEthApiBuilder, DEFAULT_MIN_SUGGESTED_PRIORITY_FEE}, + ScrollEthApiError, +}; +use revm::context::TxEnv; +use scroll_alloy_evm::ScrollTransactionIntoTxEnv; +use scroll_alloy_hardforks::ScrollHardforks; +use scroll_alloy_network::Scroll; +use std::marker::PhantomData; + +/// Marker trait for Scroll node types with standard engine, chain spec, and primitives. +pub trait ScrollNodeTypes: + NodeTypes<Payload = ScrollEngineTypes, ChainSpec: ScrollHardforks, Primitives = ScrollPrimitives> +{ +} + +/// Blanket impl for all node types that conform to the Scroll spec. +impl<N> ScrollNodeTypes for N where + N: NodeTypes< + Payload = ScrollEngineTypes, + ChainSpec: ScrollHardforks, + Primitives = ScrollPrimitives, + > +{ +} + +/// Add-ons for the Scroll follower node. +#[derive(Debug)] +pub struct ScrollAddOns<N, RpcMiddleWare = Identity> +where + N: FullNodeComponents<Types: ScrollNodeTypes>, + ScrollEthApiBuilder: EthApiBuilder<N>, +{ + /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers + /// and eth-api. + pub rpc_add_ons: RpcAddOns< + N, + ScrollEthApiBuilder, + ScrollEngineValidatorBuilder, + BasicEngineApiBuilder<ScrollEngineValidatorBuilder>, + BasicEngineValidatorBuilder<ScrollEngineValidatorBuilder>, + RpcMiddleWare, + >, +} + +impl<N> Default for ScrollAddOns<N, Identity> +where + N: FullNodeComponents<Types: ScrollNodeTypes>, + ScrollEthApiBuilder: EthApiBuilder<N>, +{ + fn default() -> Self { + Self::builder::<Scroll>().build() + } +} + +impl<N, RpcMiddleware> ScrollAddOns<N, RpcMiddleware> +where + N: FullNodeComponents<Types: ScrollNodeTypes>, + ScrollEthApiBuilder: EthApiBuilder<N>, +{ + /// Build a [`ScrollAddOns`] using [`ScrollAddOnsBuilder`]. + pub fn builder<NetworkT>() -> ScrollAddOnsBuilder<NetworkT> { + ScrollAddOnsBuilder::default() + } +} + +impl<N, RpcMiddleware> NodeAddOns<N> for ScrollAddOns<N, RpcMiddleware> +where + N: FullNodeComponents< + Types: NodeTypes< + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + Storage = ScrollStorage, + Payload = ScrollEngineTypes, + >, + Evm: ConfigureEngineEvm< + <<N::Types as NodeTypes>::Payload as PayloadTypes>::ExecutionData, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, + >, + ScrollEthApiError: FromEvmError<N::Evm>, + EvmFactoryFor<N::Evm>: EvmFactory<Tx = ScrollTransactionIntoTxEnv<TxEnv>, BlockEnv = BlockEnv>, + RpcMiddleware: RethRpcMiddleware, +{ + type Handle = RpcHandle<N, <ScrollEthApiBuilder as EthApiBuilder<N>>::EthApi>; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result<Self::Handle> { + let Self { rpc_add_ons } = self; + rpc_add_ons.launch_add_ons_with(ctx, |_| Ok(())).await + } +} + +impl<N, RpcMiddleware> RethRpcAddOns<N> for ScrollAddOns<N, RpcMiddleware> +where + N: FullNodeComponents< + Types: NodeTypes< + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + Storage = ScrollStorage, + Payload = ScrollEngineTypes, + >, + Evm: ConfigureEngineEvm< + <<N::Types as NodeTypes>::Payload as PayloadTypes>::ExecutionData, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, + >, + ScrollEthApiError: FromEvmError<N::Evm>, + EvmFactoryFor<N::Evm>: EvmFactory<Tx = ScrollTransactionIntoTxEnv<TxEnv>, BlockEnv = BlockEnv>, + RpcMiddleware: RethRpcMiddleware, +{ + type EthApi = <ScrollEthApiBuilder as EthApiBuilder<N>>::EthApi; + + fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks<N, Self::EthApi> { + self.rpc_add_ons.hooks_mut() + } +} + +impl<N> EngineValidatorAddOn<N> for ScrollAddOns<N> +where + N: FullNodeComponents< + Types: NodeTypes< + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + Payload = ScrollEngineTypes, + >, + Evm: ConfigureEngineEvm< + <<N::Types as NodeTypes>::Payload as PayloadTypes>::ExecutionData, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, + >, + ScrollEthApiBuilder: EthApiBuilder<N>, +{ + type ValidatorBuilder = BasicEngineValidatorBuilder<ScrollEngineValidatorBuilder>; + + fn engine_validator_builder(&self) -> Self::ValidatorBuilder { + EngineValidatorAddOn::engine_validator_builder(&self.rpc_add_ons) + } +} + +/// A regular scroll evm and executor builder. +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct ScrollAddOnsBuilder<NetworkT, RpcMiddleware = Identity> { + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_url: Option<String>, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, + /// Maximum payload size + payload_size_limit: u64, + /// Marker for network types. + _nt: PhantomData<NetworkT>, + /// RPC middleware to use + rpc_middleware: RpcMiddleware, +} + +impl<NetworkT> Default for ScrollAddOnsBuilder<NetworkT> { + fn default() -> Self { + Self { + sequencer_url: None, + payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, + min_suggested_priority_fee: DEFAULT_MIN_SUGGESTED_PRIORITY_FEE, + _nt: PhantomData, + rpc_middleware: Identity::new(), + } + } +} + +impl<NetworkT, RpcMiddleWare> ScrollAddOnsBuilder<NetworkT, RpcMiddleWare> { + /// With a [`reth_scroll_rpc::SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option<String>) -> Self { + self.sequencer_url = sequencer_client; + self + } + + /// With minimum suggested priority fee. + pub const fn with_min_suggested_priority_fee( + mut self, + min_suggested_priority_fee: u64, + ) -> Self { + self.min_suggested_priority_fee = min_suggested_priority_fee; + self + } + + /// With maximum payload size limit. + pub const fn with_payload_size_limit(mut self, payload_size_limit: u64) -> Self { + self.payload_size_limit = payload_size_limit; + self + } + + /// Configure the RPC middleware to use + pub fn with_rpc_middleware<T>(self, rpc_middleware: T) -> ScrollAddOnsBuilder<NetworkT, T> { + let Self { sequencer_url, min_suggested_priority_fee, payload_size_limit, _nt, .. } = self; + ScrollAddOnsBuilder { + sequencer_url, + payload_size_limit, + min_suggested_priority_fee, + _nt, + rpc_middleware, + } + } +} + +impl<NetworkT, RpcMiddleWare> ScrollAddOnsBuilder<NetworkT, RpcMiddleWare> { + /// Builds an instance of [`ScrollAddOns`]. + pub fn build<N>(self) -> ScrollAddOns<N, RpcMiddleWare> + where + N: FullNodeComponents<Types: ScrollNodeTypes>, + ScrollEthApiBuilder: EthApiBuilder<N>, + { + let Self { + sequencer_url, + payload_size_limit, + min_suggested_priority_fee, + rpc_middleware, + .. + } = self; + + ScrollAddOns { + rpc_add_ons: RpcAddOns::new( + ScrollEthApiBuilder::new() + .with_sequencer(sequencer_url) + .with_payload_size_limit(payload_size_limit) + .with_min_suggested_priority_fee(min_suggested_priority_fee), + ScrollEngineValidatorBuilder::default(), + BasicEngineApiBuilder::default(), + BasicEngineValidatorBuilder::default(), + rpc_middleware, + ), + } + } +}
diff --git reth/crates/scroll/node/src/args.rs scroll-reth/crates/scroll/node/src/args.rs new file mode 100644 index 0000000000000000000000000000000000000000..7fe563804e2cca31ab5f6ee551183a348821bbe8 --- /dev/null +++ scroll-reth/crates/scroll/node/src/args.rs @@ -0,0 +1,30 @@ +use crate::builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT; + +use reth_scroll_rpc::eth::DEFAULT_MIN_SUGGESTED_PRIORITY_FEE; + +/// Rollup arguments for the Scroll node. +#[derive(Debug, Clone, clap::Args)] +pub struct ScrollRollupArgs { + /// Endpoint for the sequencer mempool (can be both HTTP and WS) + #[arg(long = "scroll.sequencer")] + pub sequencer: Option<String>, + + /// Minimum suggested priority fee (tip) in wei, default to + /// [`DEFAULT_MIN_SUGGESTED_PRIORITY_FEE`]. + #[arg(long = "scroll.min-suggested-priority-fee", default_value_t = DEFAULT_MIN_SUGGESTED_PRIORITY_FEE)] + pub min_suggested_priority_fee: u64, + + /// Payload size limit, default to `122kB`. + #[arg(long = "scroll.payload-size-limit", default_value_t = SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT)] + pub payload_size_limit: u64, +} + +impl Default for ScrollRollupArgs { + fn default() -> Self { + Self { + sequencer: None, + min_suggested_priority_fee: DEFAULT_MIN_SUGGESTED_PRIORITY_FEE, + payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, + } + } +}
diff --git reth/crates/scroll/node/src/builder/consensus.rs scroll-reth/crates/scroll/node/src/builder/consensus.rs new file mode 100644 index 0000000000000000000000000000000000000000..05e6601827df59178f96c703f09b6e0302c0af2c --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/consensus.rs @@ -0,0 +1,29 @@ +use reth_chainspec::EthChainSpec; +use reth_node_builder::{components::ConsensusBuilder, BuilderContext, FullNodeTypes}; +use reth_node_types::NodeTypes; +use reth_primitives_traits::NodePrimitives; +use reth_scroll_consensus::ScrollBeaconConsensus; +use reth_scroll_primitives::ScrollReceipt; +use scroll_alloy_consensus::ScrollTransaction; +use scroll_alloy_hardforks::ScrollHardforks; +use std::sync::Arc; + +/// The consensus builder for Scroll. +#[derive(Debug, Default, Clone, Copy)] +pub struct ScrollConsensusBuilder; + +impl<Node> ConsensusBuilder<Node> for ScrollConsensusBuilder +where + Node: FullNodeTypes< + Types: NodeTypes< + ChainSpec: EthChainSpec + ScrollHardforks, + Primitives: NodePrimitives<Receipt = ScrollReceipt, SignedTx: ScrollTransaction>, + >, + >, +{ + type Consensus = Arc<ScrollBeaconConsensus<<Node::Types as NodeTypes>::ChainSpec>>; + + async fn build_consensus(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> { + Ok(Arc::new(ScrollBeaconConsensus::new(ctx.chain_spec()))) + } +}
diff --git reth/crates/scroll/node/src/builder/engine.rs scroll-reth/crates/scroll/node/src/builder/engine.rs new file mode 100644 index 0000000000000000000000000000000000000000..09a564b2afa7b4392662b1a306f070ce87686adc --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/engine.rs @@ -0,0 +1,159 @@ +use crate::addons::ScrollNodeTypes; +use std::sync::Arc; + +use alloy_consensus::BlockHeader; +use alloy_rpc_types_engine::{ExecutionData, PayloadError}; +use reth_node_api::{ + AddOnsContext, EngineApiMessageVersion, EngineApiValidator, EngineObjectValidationError, + ExecutionPayload, FullNodeComponents, InvalidPayloadAttributesError, MessageValidationKind, + NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, PayloadValidator, + VersionSpecificValidationError, +}; +use reth_node_builder::rpc::PayloadValidatorBuilder; +use reth_node_types::NodeTypes; +use reth_primitives_traits::{Block, RecoveredBlock}; +use reth_scroll_consensus::{CLIQUE_IN_TURN_DIFFICULTY, CLIQUE_NO_TURN_DIFFICULTY}; +use reth_scroll_engine_primitives::try_into_block; +use reth_scroll_primitives::ScrollBlock; +use scroll_alloy_hardforks::ScrollHardforks; +use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; + +/// Builder for [`ScrollEngineValidator`]. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct ScrollEngineValidatorBuilder; + +impl<Node> PayloadValidatorBuilder<Node> for ScrollEngineValidatorBuilder +where + Node: FullNodeComponents<Types: ScrollNodeTypes>, +{ + type Validator = ScrollEngineValidator<<Node::Types as NodeTypes>::ChainSpec>; + + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result<Self::Validator> { + Ok(ScrollEngineValidator::new(ctx.config.chain.clone())) + } +} + +/// Scroll engine validator. +#[derive(Debug, Clone)] +pub struct ScrollEngineValidator<CS> { + chainspec: Arc<CS>, +} + +impl<CS> ScrollEngineValidator<CS> { + /// Returns a new [`ScrollEngineValidator`]. + pub const fn new(chainspec: Arc<CS>) -> Self { + Self { chainspec } + } +} + +impl<CS, Types> EngineApiValidator<Types> for ScrollEngineValidator<CS> +where + Types: PayloadTypes<PayloadAttributes = ScrollPayloadAttributes, ExecutionData = ExecutionData>, + CS: ScrollHardforks + Send + Sync + 'static, +{ + fn validate_version_specific_fields( + &self, + _version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, ScrollPayloadAttributes>, + ) -> Result<(), EngineObjectValidationError> { + validate_scroll_payload_or_attributes( + &payload_or_attrs, + payload_or_attrs.message_validation_kind(), + )?; + Ok(()) + } + + fn ensure_well_formed_attributes( + &self, + _version: EngineApiMessageVersion, + attributes: &ScrollPayloadAttributes, + ) -> Result<(), EngineObjectValidationError> { + validate_scroll_payload_or_attributes( + &PayloadOrAttributes::PayloadAttributes::<'_, ExecutionData, _>(attributes), + MessageValidationKind::PayloadAttributes, + )?; + + // ensure block data hint is present pre euclid. + let is_euclid_active = + self.chainspec.is_euclid_active_at_timestamp(attributes.payload_attributes.timestamp); + if !is_euclid_active && attributes.block_data_hint.is_empty() { + return Err(EngineObjectValidationError::InvalidParams( + "Missing block data hint Pre-Euclid".to_string().into(), + )); + } + + Ok(()) + } +} + +/// Validates the payload or attributes for Scroll. +fn validate_scroll_payload_or_attributes<Payload: ExecutionPayload>( + payload_or_attributes: &PayloadOrAttributes<'_, Payload, ScrollPayloadAttributes>, + message_validation_kind: MessageValidationKind, +) -> Result<(), EngineObjectValidationError> { + if payload_or_attributes.parent_beacon_block_root().is_some() { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::ParentBeaconBlockRootNotSupportedBeforeV3)); + } + if payload_or_attributes.withdrawals().is_some() { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)); + } + + Ok(()) +} + +impl<CS, Types> PayloadValidator<Types> for ScrollEngineValidator<CS> +where + Types: PayloadTypes<ExecutionData = ExecutionData>, + CS: ScrollHardforks + Send + Sync + 'static, +{ + type Block = ScrollBlock; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionData, + ) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> { + let expected_hash = payload.payload.block_hash(); + + // First parse the block + let mut block = try_into_block(payload, self.chainspec.clone())?; + + // Seal the block with the no-turn difficulty and return if hashes match. + // We guess the difficulty, which should always be 1 or 2 on Scroll. + // CLIQUE_NO_TURN_DIFFICULTY is used starting at Euclid, so we test this value first. + block.header.difficulty = CLIQUE_NO_TURN_DIFFICULTY; + let block_hash_no_turn = block.hash_slow(); + if block_hash_no_turn == expected_hash { + return block + .seal_unchecked(block_hash_no_turn) + .try_recover() + .map_err(|err| NewPayloadError::Other(err.into())); + } + + // Seal the block with the in-turn difficulty and return if hashes match + block.header.difficulty = CLIQUE_IN_TURN_DIFFICULTY; + let block_hash_in_turn = block.hash_slow(); + if block_hash_in_turn == expected_hash { + return block + .seal_unchecked(block_hash_in_turn) + .try_recover() + .map_err(|err| NewPayloadError::Other(err.into())); + } + + Err(PayloadError::BlockHash { execution: block_hash_no_turn, consensus: expected_hash } + .into()) + } + + fn validate_payload_attributes_against_header( + &self, + attr: &Types::PayloadAttributes, + header: &<Self::Block as Block>::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() < header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } +}
diff --git reth/crates/scroll/node/src/builder/execution.rs scroll-reth/crates/scroll/node/src/builder/execution.rs new file mode 100644 index 0000000000000000000000000000000000000000..2e56b699cab9ae2e1bd5e4169404b90555ec40ba --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/execution.rs @@ -0,0 +1,24 @@ +use reth_node_builder::{components::ExecutorBuilder, BuilderContext, FullNodeTypes}; +use reth_node_types::NodeTypes; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_evm::ScrollEvmConfig; +use reth_scroll_primitives::ScrollPrimitives; + +/// Executor builder for Scroll. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct ScrollExecutorBuilder; + +impl<Node> ExecutorBuilder<Node> for ScrollExecutorBuilder +where + Node: FullNodeTypes, + Node::Types: NodeTypes<ChainSpec = ScrollChainSpec, Primitives = ScrollPrimitives>, +{ + type EVM = ScrollEvmConfig; + + async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> { + let evm_config = ScrollEvmConfig::scroll(ctx.chain_spec()); + + Ok(evm_config) + } +}
diff --git reth/crates/scroll/node/src/builder/mod.rs scroll-reth/crates/scroll/node/src/builder/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..86ed6d848601ad99def48042f8331f48e688b84d --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/mod.rs @@ -0,0 +1,6 @@ +pub(crate) mod consensus; +pub(crate) mod engine; +pub(crate) mod execution; +pub(crate) mod network; +pub(crate) mod payload; +pub(crate) mod pool;
diff --git reth/crates/scroll/node/src/builder/network.rs scroll-reth/crates/scroll/node/src/builder/network.rs new file mode 100644 index 0000000000000000000000000000000000000000..be1eee8ece93e87f5ef0025e8901cd0a21dc2f83 --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/network.rs @@ -0,0 +1,71 @@ +use reth_eth_wire_types::BasicNetworkPrimitives; +use reth_network::{ + config::NetworkMode, + protocol::{RlpxSubProtocol, RlpxSubProtocols}, + NetworkConfig, NetworkHandle, NetworkManager, PeersInfo, +}; +use reth_node_api::TxTy; +use reth_node_builder::{components::NetworkBuilder, BuilderContext, FullNodeTypes}; +use reth_node_types::NodeTypes; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_primitives::ScrollPrimitives; +use reth_tracing::tracing::info; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use std::fmt::Debug; +/// The network builder for Scroll. +#[derive(Debug, Default)] +pub struct ScrollNetworkBuilder { + /// Additional `RLPx` sub-protocols to be added to the network. + scroll_sub_protocols: RlpxSubProtocols, +} + +impl ScrollNetworkBuilder { + /// Create a new [`ScrollNetworkBuilder`] with default configuration. + pub fn new() -> Self { + Self { scroll_sub_protocols: RlpxSubProtocols::default() } + } + + /// Add a scroll sub-protocol to the network builder. + pub fn with_sub_protocol(mut self, protocol: RlpxSubProtocol) -> Self { + self.scroll_sub_protocols.push(protocol); + self + } +} + +impl<Node, Pool> NetworkBuilder<Node, Pool> for ScrollNetworkBuilder +where + Node: + FullNodeTypes<Types: NodeTypes<ChainSpec = ScrollChainSpec, Primitives = ScrollPrimitives>>, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TxTy<Node::Types>, + Pooled = scroll_alloy_consensus::ScrollPooledTransaction, + >, + > + Unpin + + 'static, +{ + type Network = NetworkHandle<ScrollNetworkPrimitives>; + + async fn build_network( + self, + ctx: &BuilderContext<Node>, + pool: Pool, + ) -> eyre::Result<Self::Network> { + // set the network mode to work. + let config = ctx.network_config()?; + let config = NetworkConfig { + network_mode: NetworkMode::Work, + extra_protocols: self.scroll_sub_protocols, + ..config + }; + + let network = NetworkManager::builder(config).await?; + let handle = ctx.start_network(network, pool, None); + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); + Ok(handle) + } +} + +/// Network primitive types used by Scroll networks. +pub type ScrollNetworkPrimitives = + BasicNetworkPrimitives<ScrollPrimitives, scroll_alloy_consensus::ScrollPooledTransaction>;
diff --git reth/crates/scroll/node/src/builder/payload.rs scroll-reth/crates/scroll/node/src/builder/payload.rs new file mode 100644 index 0000000000000000000000000000000000000000..25761255bac35c31ca7be344251b759aa3ec6928 --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/payload.rs @@ -0,0 +1,113 @@ +use reth_evm::ConfigureEvm; +use reth_node_api::PrimitivesTy; +use reth_node_builder::{ + components::PayloadBuilderBuilder, BuilderContext, FullNodeTypes, PayloadBuilderConfig, +}; +use reth_node_types::{NodeTypes, TxTy}; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_engine_primitives::ScrollEngineTypes; +use reth_scroll_evm::ScrollNextBlockEnvAttributes; +use reth_scroll_payload::{ScrollBuilderConfig, ScrollPayloadTransactions}; +use reth_scroll_primitives::{ScrollPrimitives, ScrollTransactionSigned}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use std::time::Duration; + +/// Payload builder for Scroll. +#[derive(Debug, Clone, Copy)] +pub struct ScrollPayloadBuilderBuilder<Txs = ()> { + /// Returns the current best transactions from the mempool. + pub best_transactions: Txs, + /// The payload building time limit. + pub payload_building_time_limit: Duration, + /// The block DA size limit. + pub block_da_size_limit: Option<u64>, +} + +impl Default for ScrollPayloadBuilderBuilder { + fn default() -> Self { + Self { + best_transactions: (), + payload_building_time_limit: SCROLL_PAYLOAD_BUILDING_DURATION, + block_da_size_limit: Some(SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT), + } + } +} + +const SCROLL_GAS_LIMIT: u64 = 20_000_000; +const SCROLL_PAYLOAD_BUILDING_DURATION: Duration = Duration::from_secs(1); +pub(crate) const SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; + +impl<Txs> ScrollPayloadBuilderBuilder<Txs> { + /// A helper method to initialize [`reth_scroll_payload::ScrollPayloadBuilder`] with the + /// given EVM config. + pub fn build<Node, Evm, Pool>( + self, + evm_config: Evm, + ctx: &BuilderContext<Node>, + pool: Pool, + ) -> eyre::Result<reth_scroll_payload::ScrollPayloadBuilder<Pool, Node::Provider, Evm, Txs>> + where + Node: FullNodeTypes< + Types: NodeTypes< + Payload = ScrollEngineTypes, + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + >, + >, + Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>> + + Unpin + + 'static, + Evm: ConfigureEvm<Primitives = PrimitivesTy<Node::Types>>, + Txs: ScrollPayloadTransactions<Pool::Transaction>, + { + let gas_limit = ctx.payload_builder_config().gas_limit().unwrap_or_else (|| { + tracing::warn!(target: "reth::cli", "Using {SCROLL_GAS_LIMIT} gas limit for ScrollPayloadBuilder. Configure with --builder.gaslimit"); + SCROLL_GAS_LIMIT + }); + + let payload_builder = reth_scroll_payload::ScrollPayloadBuilder::new( + pool, + evm_config, + ctx.provider().clone(), + ScrollBuilderConfig::new( + Some(gas_limit), + self.payload_building_time_limit, + self.block_da_size_limit, + ), + ) + .with_transactions(self.best_transactions); + + Ok(payload_builder) + } +} + +impl<Node, Pool, Txs, Evm> PayloadBuilderBuilder<Node, Pool, Evm> + for ScrollPayloadBuilderBuilder<Txs> +where + Node: FullNodeTypes< + Types: NodeTypes< + Payload = ScrollEngineTypes, + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + >, + >, + Evm: ConfigureEvm< + Primitives = PrimitivesTy<Node::Types>, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + > + 'static, + Pool: TransactionPool<Transaction: PoolTransaction<Consensus = ScrollTransactionSigned>> + + Unpin + + 'static, + Txs: ScrollPayloadTransactions<Pool::Transaction>, +{ + type PayloadBuilder = reth_scroll_payload::ScrollPayloadBuilder<Pool, Node::Provider, Evm, Txs>; + + async fn build_payload_builder( + self, + ctx: &BuilderContext<Node>, + pool: Pool, + evm_config: Evm, + ) -> eyre::Result<Self::PayloadBuilder> { + self.build(evm_config, ctx, pool) + } +}
diff --git reth/crates/scroll/node/src/builder/pool.rs scroll-reth/crates/scroll/node/src/builder/pool.rs new file mode 100644 index 0000000000000000000000000000000000000000..27079e78d1445b2edb7cd6301d556cf28e6de5c7 --- /dev/null +++ scroll-reth/crates/scroll/node/src/builder/pool.rs @@ -0,0 +1,382 @@ +use reth_chainspec::EthChainSpec; +use reth_node_api::{FullNodeTypes, NodeTypes}; +use reth_node_builder::{ + components::{PoolBuilder, PoolBuilderConfigOverrides}, + BuilderContext, TxTy, +}; + +use reth_provider::CanonStateSubscriptions; +use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; +use reth_scroll_evm::ScrollBaseFeeProvider; +use reth_scroll_txpool::{ScrollTransactionPool, ScrollTransactionValidator}; +use reth_transaction_pool::{ + blobstore::DiskFileBlobStore, CoinbaseTipOrdering, EthPoolTransaction, + TransactionValidationTaskExecutor, +}; +use scroll_alloy_consensus::ScrollTransaction; +use scroll_alloy_hardforks::ScrollHardforks; + +/// A basic scroll transaction pool. +/// +/// This contains various settings that can be configured and take precedence over the node's +/// config. +#[derive(Debug, Clone)] +pub struct ScrollPoolBuilder<T = reth_scroll_txpool::ScrollPooledTransaction> { + /// Enforced overrides that are applied to the pool config. + pub pool_config_overrides: PoolBuilderConfigOverrides, + + /// Marker for the pooled transaction type. + _pd: core::marker::PhantomData<T>, +} + +impl<T> Default for ScrollPoolBuilder<T> { + fn default() -> Self { + Self { pool_config_overrides: Default::default(), _pd: Default::default() } + } +} + +impl<T> ScrollPoolBuilder<T> { + /// Sets the [`PoolBuilderConfigOverrides`] on the pool builder. + pub fn with_pool_config_overrides( + mut self, + pool_config_overrides: PoolBuilderConfigOverrides, + ) -> Self { + self.pool_config_overrides = pool_config_overrides; + self + } +} + +impl<Node, T> PoolBuilder<Node> for ScrollPoolBuilder<T> +where + Node: FullNodeTypes< + Types: NodeTypes< + ChainSpec: EthChainSpec + ScrollHardforks + ChainConfig<Config = ScrollChainConfig>, + >, + >, + T: EthPoolTransaction<Consensus = TxTy<Node::Types>> + ScrollTransaction, +{ + type Pool = ScrollTransactionPool<Node::Provider, DiskFileBlobStore, T>; + + async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> { + let Self { pool_config_overrides, .. } = self; + let data_dir = ctx.config().datadir(); + let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; + + let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) + .no_eip4844() + .with_head_timestamp(ctx.head().timestamp) + .kzg_settings(ctx.kzg_settings()?) + .with_local_transactions_config( + pool_config_overrides.clone().apply(ctx.pool_config()).local_transactions_config, + ) + .with_max_tx_input_bytes(ctx.chain_spec().chain_config().max_tx_payload_bytes_per_block) + .with_additional_tasks( + pool_config_overrides + .additional_validation_tasks + .unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks), + ) + .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()) + .map(|validator| { + ScrollTransactionValidator::new(validator) + // In --dev mode we can't require gas fees because we're unable to decode + // the L1 block info + .require_l1_data_gas_fee(!ctx.config().dev.dev) + }); + + let transaction_pool = reth_transaction_pool::Pool::new( + validator, + CoinbaseTipOrdering::default(), + blob_store, + pool_config_overrides.apply(ctx.pool_config()), + ); + tracing::info!(target: "reth::cli", "Transaction pool initialized"); + let transactions_path = data_dir.txpool_transactions(); + + // spawn txpool maintenance tasks + { + let chain_events = ctx.provider().canonical_state_stream(); + let client = ctx.provider().clone(); + let transactions_backup_config = + reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path); + let base_fee_provider = ScrollBaseFeeProvider::new(ctx.chain_spec()); + + ctx.task_executor().spawn_critical_with_graceful_shutdown_signal( + "local transactions backup task", + |shutdown| { + reth_transaction_pool::maintain::backup_local_transactions_task( + shutdown, + transaction_pool.clone(), + transactions_backup_config, + ) + }, + ); + + // spawn the main maintenance task + ctx.task_executor().spawn_critical( + "txpool maintenance task", + reth_transaction_pool::maintain::maintain_transaction_pool_future( + client, + base_fee_provider, + transaction_pool.clone(), + chain_events, + ctx.task_executor().clone(), + reth_transaction_pool::maintain::MaintainPoolConfig { + max_tx_lifetime: transaction_pool.config().max_queued_lifetime, + ..Default::default() + }, + ), + ); + tracing::debug!(target: "reth::cli", "Spawned txpool maintenance task"); + } + + Ok(transaction_pool) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ScrollNode; + + use alloy_consensus::{transaction::Recovered, Header, Signed, TxLegacy}; + use alloy_primitives::{private::rand::random_iter, Bytes, Sealed, Signature, B256, U256}; + use reth_chainspec::Head; + use reth_db::mock::DatabaseMock; + use reth_node_api::FullNodeTypesAdapter; + use reth_node_builder::common::WithConfigs; + use reth_node_core::node_config::NodeConfig; + use reth_primitives_traits::{ + transaction::error::InvalidTransactionError, GotExpected, GotExpectedBoxed, + }; + use reth_provider::{ + noop::NoopProvider, + test_utils::{ExtendedAccount, MockEthProvider}, + }; + use reth_scroll_chainspec::{ScrollChainSpec, SCROLL_DEV, SCROLL_MAINNET}; + use reth_scroll_primitives::{ScrollBlock, ScrollPrimitives}; + use reth_scroll_txpool::ScrollPooledTransaction; + use reth_tasks::TaskManager; + use reth_transaction_pool::{ + blobstore::NoopBlobStore, + error::{InvalidPoolTransactionError, PoolErrorKind}, + PoolConfig, TransactionOrigin, TransactionPool, + }; + use scroll_alloy_consensus::{ScrollTxEnvelope, TxL1Message}; + use scroll_alloy_evm::gas_price_oracle::L1_GAS_PRICE_ORACLE_ADDRESS; + + async fn pool() -> ( + ScrollTransactionPool<NoopProvider<ScrollChainSpec, ScrollPrimitives>, DiskFileBlobStore>, + TaskManager, + ) { + let handle = tokio::runtime::Handle::current(); + let manager = TaskManager::new(handle); + let config = WithConfigs { + config: NodeConfig::new(SCROLL_MAINNET.clone()), + toml_config: Default::default(), + }; + + let pool_builder = ScrollPoolBuilder::<ScrollPooledTransaction>::default(); + let ctx = BuilderContext::< + FullNodeTypesAdapter< + ScrollNode, + DatabaseMock, + NoopProvider<ScrollChainSpec, ScrollPrimitives>, + >, + >::new( + Head::default(), + NoopProvider::new(SCROLL_MAINNET.clone()), + manager.executor(), + config, + ); + (pool_builder.build_pool(&ctx).await.unwrap(), manager) + } + + #[tokio::test] + async fn test_validate_one_oversized_transaction() { + // create the pool. + let (pool, manager) = pool().await; + let tx = ScrollTxEnvelope::Legacy(Signed::new_unchecked( + TxLegacy { gas_limit: 21_000, ..Default::default() }, + Signature::new(U256::ZERO, U256::ZERO, false), + Default::default(), + )); + + // Create a pool transaction with an encoded length of 123,904 bytes. + let pool_tx = ScrollPooledTransaction::new( + Recovered::new_unchecked(tx, Default::default()), + 121 * 1024, + ); + + // add the transaction to the pool and expect an `OversizedData` error. + let err = pool.add_transaction(TransactionOrigin::Local, pool_tx).await.unwrap_err(); + assert!(matches!( + err.kind, + PoolErrorKind::InvalidTransaction( + InvalidPoolTransactionError::OversizedData(x, y,) + ) if x == 121*1024 && y == 120*1024, + )); + + // explicitly drop the manager here otherwise the `TransactionValidationTaskExecutor` will + // drop all validation tasks. + drop(manager); + } + + #[tokio::test] + async fn test_validate_one_rollup_fee_exceeds_limit() { + // create the client. + let handle = tokio::runtime::Handle::current(); + let manager = TaskManager::new(handle); + let blob_store = NoopBlobStore::default(); + let signer = Default::default(); + let client = + MockEthProvider::<ScrollPrimitives, _>::new().with_chain_spec(SCROLL_DEV.clone()); + let hash = B256::random(); + + // load a header, block, signer and the L1_GAS_PRICE_ORACLE_ADDRESS storage. + client.add_header(hash, Header::default()); + client.add_block(hash, ScrollBlock::default()); + client.add_account(signer, ExtendedAccount::new(0, U256::from(400_000))); + client.add_account( + L1_GAS_PRICE_ORACLE_ADDRESS, + ExtendedAccount::new(0, U256::from(400_000)).extend_storage( + (0u8..8).map(|k| (B256::from(U256::from(k)), U256::from(u64::MAX))), + ), + ); + + // create the validation task. + let validator = TransactionValidationTaskExecutor::eth_builder(client) + .no_eip4844() + .build_with_tasks(manager.executor(), blob_store) + .map(|validator| { + ScrollTransactionValidator::new(validator).require_l1_data_gas_fee(true) + }); + + // create the pool. + let pool = ScrollTransactionPool::new( + validator, + CoinbaseTipOrdering::<ScrollPooledTransaction>::default(), + NoopBlobStore::default(), + PoolConfig::default(), + ); + + // prepare a transaction with random input. + let tx = ScrollTxEnvelope::Legacy(Signed::new_unchecked( + TxLegacy { + gas_limit: 55_000, + gas_price: 7, + input: Bytes::from(random_iter::<u8>().take(100).collect::<Vec<_>>()), + ..Default::default() + }, + Signature::new(U256::ZERO, U256::ZERO, false), + Default::default(), + )); + let pool_tx = + ScrollPooledTransaction::new(Recovered::new_unchecked(tx, signer), 120 * 1024); + + // add the transaction in the pool and expect to hit `InsufficientFunds` error. + let err = pool.add_transaction(TransactionOrigin::Local, pool_tx).await.unwrap_err(); + assert!(matches!( + err.kind, + PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Consensus( + InvalidTransactionError::GasUintOverflow + )) + )); + + // explicitly drop the manager here otherwise the `TransactionValidationTaskExecutor` will + // drop all validation tasks. + drop(manager); + } + + #[tokio::test] + async fn test_validate_one_rollup_fee_exceeds_balance() { + // create the client. + let handle = tokio::runtime::Handle::current(); + let manager = TaskManager::new(handle); + let blob_store = NoopBlobStore::default(); + let signer = Default::default(); + let client = + MockEthProvider::<ScrollPrimitives, _>::new().with_chain_spec(SCROLL_DEV.clone()); + let hash = B256::random(); + + // load a header, block, signer and the L1_GAS_PRICE_ORACLE_ADDRESS storage. + client.add_header(hash, Header::default()); + client.add_block(hash, ScrollBlock::default()); + client.add_account(signer, ExtendedAccount::new(0, U256::from(400_000))); + client.add_account( + L1_GAS_PRICE_ORACLE_ADDRESS, + ExtendedAccount::new(0, U256::from(400_000)).extend_storage( + (0u8..8).map(|k| (B256::from(U256::from(k)), U256::from(u32::MAX))), + ), + ); + + // create the validation task. + let validator = TransactionValidationTaskExecutor::eth_builder(client) + .no_eip4844() + .build_with_tasks(manager.executor(), blob_store) + .map(|validator| { + ScrollTransactionValidator::new(validator).require_l1_data_gas_fee(true) + }); + + // create the pool. + let pool = ScrollTransactionPool::new( + validator, + CoinbaseTipOrdering::<ScrollPooledTransaction>::default(), + NoopBlobStore::default(), + PoolConfig::default(), + ); + + // prepare a transaction with random input. + let tx = ScrollTxEnvelope::Legacy(Signed::new_unchecked( + TxLegacy { + gas_limit: 55_000, + gas_price: 7, + input: Bytes::from(random_iter::<u8>().take(100).collect::<Vec<_>>()), + ..Default::default() + }, + Signature::new(U256::ZERO, U256::ZERO, false), + Default::default(), + )); + let pool_tx = + ScrollPooledTransaction::new(Recovered::new_unchecked(tx, signer), 120 * 1024); + + // add the transaction in the pool and expect to hit `InsufficientFunds` error. + let err = pool.add_transaction(TransactionOrigin::Local, pool_tx).await.unwrap_err(); + assert!(matches!( + err.kind, + PoolErrorKind::InvalidTransaction( + InvalidPoolTransactionError::Consensus(InvalidTransactionError::InsufficientFunds(GotExpectedBoxed(expected))) + ) if *expected == GotExpected{ got: U256::from(400000), expected: U256::from(483673629772436u64) } + )); + + // explicitly drop the manager here otherwise the `TransactionValidationTaskExecutor` will + // drop all validation tasks. + drop(manager); + } + + #[tokio::test] + async fn test_validate_one_disallow_l1_messages() { + // create the pool. + let (pool, manager) = pool().await; + let tx = ScrollTxEnvelope::L1Message(Sealed::new_unchecked( + TxL1Message::default(), + B256::default(), + )); + + // Create a pool transaction with the L1 message. + let pool_tx = + ScrollPooledTransaction::new(Recovered::new_unchecked(tx, Default::default()), 0); + + // add the transaction to the pool and expect an `OversizedData` error. + let err = pool.add_transaction(TransactionOrigin::Local, pool_tx).await.unwrap_err(); + assert!(matches!( + err.kind, + PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported + )) + )); + + // explicitly drop the manager here otherwise the `TransactionValidationTaskExecutor` will + // drop all validation tasks. + drop(manager); + } +}
diff --git reth/crates/scroll/node/src/lib.rs scroll-reth/crates/scroll/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b28f39d4140a913bad1eb6c1707777e629cbf302 --- /dev/null +++ scroll-reth/crates/scroll/node/src/lib.rs @@ -0,0 +1,29 @@ +//! Node specific implementations for Scroll. + +mod args; +pub use args::ScrollRollupArgs; + +mod builder; +pub use builder::{ + consensus::ScrollConsensusBuilder, + engine::{ScrollEngineValidator, ScrollEngineValidatorBuilder}, + execution::ScrollExecutorBuilder, + network::{ScrollNetworkBuilder, ScrollNetworkPrimitives}, + payload::ScrollPayloadBuilderBuilder, + pool::ScrollPoolBuilder, +}; + +mod addons; +pub use addons::{ScrollAddOns, ScrollAddOnsBuilder, ScrollNodeTypes}; + +mod node; +pub use node::ScrollNode; + +mod storage; +pub use storage::ScrollStorage; + +/// Helpers for running test node instances. +#[cfg(feature = "test-utils")] +pub mod test_utils; + +pub use reth_scroll_engine_primitives::{ScrollBuiltPayload, ScrollPayloadBuilderAttributes};
diff --git reth/crates/scroll/node/src/node.rs scroll-reth/crates/scroll/node/src/node.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2781e15bf10b4d97ba6ed4e4de9b615f43b863e --- /dev/null +++ scroll-reth/crates/scroll/node/src/node.rs @@ -0,0 +1,117 @@ +//! Node specific implementations for Scroll. + +use crate::{ + args::ScrollRollupArgs, ScrollAddOns, ScrollAddOnsBuilder, ScrollConsensusBuilder, + ScrollExecutorBuilder, ScrollNetworkBuilder, ScrollPayloadBuilderBuilder, ScrollPoolBuilder, + ScrollStorage, +}; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_node_api::{FullNodeComponents, PayloadAttributesBuilder, PayloadTypes}; +use reth_node_builder::{ + components::{BasicPayloadServiceBuilder, ComponentsBuilder}, + node::{FullNodeTypes, NodeTypes}, + DebugNode, Node, NodeAdapter, NodeComponentsBuilder, +}; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_engine_primitives::ScrollEngineTypes; +use reth_scroll_primitives::ScrollPrimitives; +use scroll_alloy_network::Scroll; +use std::sync::Arc; + +/// The Scroll node implementation. +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct ScrollNode { + /// Additional Scroll args. + pub args: ScrollRollupArgs, +} + +impl ScrollNode { + /// Creates a new instance of the Scroll node type. + pub const fn new(args: ScrollRollupArgs) -> Self { + Self { args } + } + + /// Returns a [`ComponentsBuilder`] configured for a regular Ethereum node. + pub fn components<Node>() -> ComponentsBuilder< + Node, + ScrollPoolBuilder, + BasicPayloadServiceBuilder<ScrollPayloadBuilderBuilder>, + ScrollNetworkBuilder, + ScrollExecutorBuilder, + ScrollConsensusBuilder, + > + where + Node: FullNodeTypes< + Types: NodeTypes< + ChainSpec = ScrollChainSpec, + Primitives = ScrollPrimitives, + Payload = ScrollEngineTypes, + >, + >, + { + ComponentsBuilder::default() + .node_types::<Node>() + .pool(ScrollPoolBuilder::default()) + .executor(ScrollExecutorBuilder::default()) + .payload(BasicPayloadServiceBuilder::new(ScrollPayloadBuilderBuilder::default())) + .network(ScrollNetworkBuilder::new()) + .executor(ScrollExecutorBuilder) + .consensus(ScrollConsensusBuilder) + } +} + +impl<N> Node<N> for ScrollNode +where + N: FullNodeTypes<Types = Self>, +{ + type ComponentsBuilder = ComponentsBuilder< + N, + ScrollPoolBuilder, + BasicPayloadServiceBuilder<ScrollPayloadBuilderBuilder>, + ScrollNetworkBuilder, + ScrollExecutorBuilder, + ScrollConsensusBuilder, + >; + + type AddOns = ScrollAddOns< + NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>, + >; + + fn components_builder(&self) -> Self::ComponentsBuilder { + Self::components() + } + + fn add_ons(&self) -> Self::AddOns { + ScrollAddOnsBuilder::<Scroll, _>::default() + .with_sequencer(self.args.sequencer.clone()) + .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) + .with_payload_size_limit(self.args.payload_size_limit) + .build() + } +} + +impl<N> DebugNode<N> for ScrollNode +where + N: FullNodeComponents<Types = Self>, +{ + type RpcBlock = alloy_rpc_types_eth::Block<scroll_alloy_consensus::ScrollTxEnvelope>; + + fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy<Self> { + rpc_block.into_consensus() + } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<<<Self as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes> + { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } +} + +impl NodeTypes for ScrollNode { + type Primitives = ScrollPrimitives; + type ChainSpec = ScrollChainSpec; + type Storage = ScrollStorage; + type Payload = ScrollEngineTypes; +}
diff --git reth/crates/scroll/node/src/storage.rs scroll-reth/crates/scroll/node/src/storage.rs new file mode 100644 index 0000000000000000000000000000000000000000..4347044a74eb5a3533092a1252ad830da37917da --- /dev/null +++ scroll-reth/crates/scroll/node/src/storage.rs @@ -0,0 +1,5 @@ +use reth_provider::EthStorage; +use reth_scroll_primitives::ScrollTransactionSigned; + +/// The storage implementation for Scroll. +pub type ScrollStorage = EthStorage<ScrollTransactionSigned>;
diff --git reth/crates/scroll/node/src/test_utils.rs scroll-reth/crates/scroll/node/src/test_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..2530441b9bec9fd870e2f12c10bdcf1bb6d1d719 --- /dev/null +++ scroll-reth/crates/scroll/node/src/test_utils.rs @@ -0,0 +1,87 @@ +use crate::{ScrollBuiltPayload, ScrollNode as OtherScrollNode, ScrollPayloadBuilderAttributes}; +use alloy_genesis::Genesis; +use alloy_primitives::{Address, B256}; +use alloy_rpc_types_engine::PayloadAttributes; +use reth_e2e_test_utils::{ + transaction::TransactionTestContext, wallet::Wallet, NodeHelperType, TmpDB, +}; +use reth_node_api::NodeTypesWithDBAdapter; + +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_provider::providers::BlockchainProvider; +use reth_scroll_chainspec::{ScrollChainConfig, ScrollChainSpecBuilder}; +use reth_tasks::TaskManager; +use scroll_alloy_rpc_types_engine::BlockDataHint; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Scroll Node Helper type +pub(crate) type ScrollNode = NodeHelperType< + OtherScrollNode, + BlockchainProvider<NodeTypesWithDBAdapter<OtherScrollNode, TmpDB>>, +>; + +/// Creates the initial setup with `num_nodes` of the node config, started and connected. +pub async fn setup( + num_nodes: usize, + is_dev: bool, +) -> eyre::Result<(Vec<ScrollNode>, TaskManager, Wallet)> { + let genesis: Genesis = + serde_json::from_str(include_str!("../tests/assets/genesis.json")).unwrap(); + reth_e2e_test_utils::setup_engine( + num_nodes, + Arc::new( + ScrollChainSpecBuilder::scroll_mainnet().genesis(genesis).galileo_v2_activated().build( + ScrollChainConfig { + max_tx_payload_bytes_per_block: 120 * 1024, + ..Default::default() + }, + ), + ), + is_dev, + Default::default(), + scroll_payload_attributes, + ) + .await +} + +/// Advance the chain with sequential payloads returning them in the end. +pub async fn advance_chain( + length: usize, + node: &mut ScrollNode, + wallet: Arc<Mutex<Wallet>>, +) -> eyre::Result<Vec<ScrollBuiltPayload>> { + node.advance(length as u64, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::transfer_tx_nonce_bytes( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) + .await +} + +/// Helper function to create a new scroll payload attributes +pub fn scroll_payload_attributes(timestamp: u64) -> ScrollPayloadBuilderAttributes { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: None, + parent_beacon_block_root: Some(B256::ZERO), + }; + + ScrollPayloadBuilderAttributes { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], + no_tx_pool: false, + block_data_hint: BlockDataHint::none(), + gas_limit: None, + } +}
diff --git reth/crates/scroll/node/tests/assets/genesis.json scroll-reth/crates/scroll/node/tests/assets/genesis.json new file mode 100644 index 0000000000000000000000000000000000000000..9730581b3efa981e44e62d6d31497d2e0bc53ef9 --- /dev/null +++ scroll-reth/crates/scroll/node/tests/assets/genesis.json @@ -0,0 +1,108 @@ +{ + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x00", + "gasLimit": "0x1312d00", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x5300000000000000000000000000000000000002": { + "balance": "0xd3c21bcecceda1000000", + "storage": { + "0x01": "0x000000000000000000000000000000000000000000000000000000003758e6b0", + "0x02": "0x0000000000000000000000000000000000000000000000000000000000000038", + "0x03": "0x000000000000000000000000000000000000000000000000000000003e95ba80", + "0x04": "0x0000000000000000000000005300000000000000000000000000000000000003", + "0x05": "0x000000000000000000000000000000000000000000000000000000008390c2c1", + "0x06": "0x00000000000000000000000000000000000000000000000000000069cf265bfe", + "0x07": "0x00000000000000000000000000000000000000000000000000000000168b9aa3" + } + } + }, + "number": "0x0" +}
diff --git reth/crates/scroll/node/tests/e2e/main.rs scroll-reth/crates/scroll/node/tests/e2e/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..1402b98661a6946e2c62d2d11a158468546f98e5 --- /dev/null +++ scroll-reth/crates/scroll/node/tests/e2e/main.rs @@ -0,0 +1,3 @@ +#![allow(missing_docs)] + +mod payload;
diff --git reth/crates/scroll/node/tests/e2e/payload.rs scroll-reth/crates/scroll/node/tests/e2e/payload.rs new file mode 100644 index 0000000000000000000000000000000000000000..81e2bf4b45ad99460863aaf84ccfb2abd4ebcc68 --- /dev/null +++ scroll-reth/crates/scroll/node/tests/e2e/payload.rs @@ -0,0 +1,19 @@ +use reth_scroll_node::test_utils::{advance_chain, setup}; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[tokio::test] +async fn can_sync() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let (mut node, _tasks, wallet) = setup(1, false).await?; + let mut node = node.pop().unwrap(); + let wallet = Arc::new(Mutex::new(wallet)); + + let tip: usize = 90; + + // Create a chain of 90 blocks + let _canonical_payload_chain = advance_chain(tip, &mut node, wallet.clone()).await?; + + Ok(()) +}
diff --git reth/crates/scroll/openvm-compat/Cargo.lock scroll-reth/crates/scroll/openvm-compat/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..ed661f8cf46c85091a013e3b6d79eaff3f906378 --- /dev/null +++ scroll-reth/crates/scroll/openvm-compat/Cargo.lock @@ -0,0 +1,4073 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "alloy-chains" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfaa9ea039a6f9304b4a593d780b1f23e1ae183acdee938b11b38795acacc9f1" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "num_enum", + "serde", + "strum", +] + +[[package]] +name = "alloy-consensus" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad704069c12f68d0c742d0cad7e0a03882b42767350584627fbf8a47b1bf1846" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-trie", + "alloy-tx-macros", + "auto_impl", + "borsh", + "c-kzg", + "derive_more", + "either", + "k256", + "once_cell", + "rand 0.8.5", + "secp256k1", + "serde", + "serde_json", + "serde_with", + "thiserror", +] + +[[package]] +name = "alloy-consensus-any" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc374f640a5062224d7708402728e3d6879a514ba10f377da62e7dfb14c673e6" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-eip2124" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "741bdd7499908b3aa0b159bba11e71c8cddd009a2c2eb7a06e825f1ec87900a5" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "serde", + "thiserror", +] + +[[package]] +name = "alloy-eip2930" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "borsh", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "borsh", + "k256", + "serde", + "serde_with", + "thiserror", +] + +[[package]] +name = "alloy-eips" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e867b5fd52ed0372a95016f3a37cbff95a9d5409230fbaef2d8ea00e8618098" +dependencies = [ + "alloy-eip2124", + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "borsh", + "c-kzg", + "derive_more", + "either", + "serde", + "serde_with", + "sha2", + "thiserror", +] + +[[package]] +name = "alloy-evm" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08e9e656d58027542447c1ca5aa4ca96293f09e6920c4651953b7451a7c35e4e" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-hardforks", + "alloy-primitives", + "alloy-sol-types", + "auto_impl", + "derive_more", + "revm", + "thiserror", +] + +[[package]] +name = "alloy-genesis" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b90be17e9760a6ba6d13cebdb049cea405ebc8bf57d90664ed708cc5bc348342" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "alloy-trie", + "borsh", + "serde", + "serde_with", +] + +[[package]] +name = "alloy-hardforks" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e29d7eacf42f89c21d7f089916d0bdb4f36139a31698790e8837d2dbbd4b2c3" +dependencies = [ + "alloy-chains", + "alloy-eip2124", + "alloy-primitives", + "auto_impl", + "dyn-clone", +] + +[[package]] +name = "alloy-network-primitives" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d3ae2777e900a7a47ad9e3b8ab58eff3d93628265e73bbdee09acf90bf68f75" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-primitives" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "foldhash", + "hashbrown 0.16.0", + "indexmap 2.12.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.9.2", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9981491bb98e76099983f516ec7de550db0597031f5828c994961eb4bb993cce" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "derive_more", + "strum", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29031a6bf46177d65efce661f7ab37829ca09dd341bc40afb5194e97600655cc" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.14.0", + "serde", + "serde_json", + "serde_with", + "thiserror", +] + +[[package]] +name = "alloy-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01e856112bfa0d9adc85bd7c13db03fad0e71d1d6fb4c2010e475b6718108236" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-sol-macro" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ce480400051b5217f19d6e9a82d9010cdde20f1ae9c00d53591e4a1afbb312" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap 2.12.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.110", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" +dependencies = [ + "const-hex", + "dunce", + "heck", + "macro-string", + "proc-macro2", + "quote", + "syn 2.0.110", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70319350969a3af119da6fb3e9bddb1bce66c9ea933600cb297c8b1850ad2a3c" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", +] + +[[package]] +name = "alloy-trie" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3412d52bb97c6c6cc27ccc28d4e6e8cf605469101193b50b0bd5813b1f990b5" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more", + "nybbles", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "alloy-tx-macros" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ccf423f6de62e8ce1d6c7a11fb7508ae3536d02e0d68aaeb05c8669337d0937" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ark-bls12-381" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df4dcc01ff89867cd86b0da835f23c3f02738353aaee7dde7495af71363b8d5" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.5", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.110", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.5", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + +[[package]] +name = "aurora-engine-modexp" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "518bc5745a6264b5fd7b09dffb9667e400ee9e2bbe18555fac75e1fe9afa0df9" +dependencies = [ + "hex", + "num", +] + +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitcoin-io" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", +] + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +dependencies = [ + "serde_core", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "serde", + "tap", + "wyz", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blst" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +dependencies = [ + "serde", +] + +[[package]] +name = "c-kzg" +version = "2.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", +] + +[[package]] +name = "cc" +version = "1.2.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + +[[package]] +name = "const-hex" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" +dependencies = [ + "cfg-if", + "cpufeatures", + "proptest", + "serde_core", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "convert_case" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "serde", + "strsim", + "syn 2.0.110", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive-where" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "syn 2.0.110", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "serdect", + "signature", + "spki", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +dependencies = [ + "serde", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "enumn" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "generic-array" +version = "0.14.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +dependencies = [ + "foldhash", + "serde", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +dependencies = [ + "equivalent", + "hashbrown 0.16.0", + "serde", + "serde_core", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "serdect", + "sha2", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "modular-bitfield" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a53d79ba8304ac1c4f9eb3b9d281f21f7be9d4626f72ce7df4ad8fbde4f38a74" +dependencies = [ + "modular-bitfield-impl", + "static_assertions", +] + +[[package]] +name = "modular-bitfield-impl" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "nums" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3c74f925fb8cfc49a8022f2afce48a0683b70f9e439885594e84c5edbf5b01" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "nybbles" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" +dependencies = [ + "alloy-rlp", + "cfg-if", + "proptest", + "ruint", + "serde", + "smallvec", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] + +[[package]] +name = "op-alloy-consensus" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a501241474c3118833d6195312ae7eb7cc90bbb0d5f524cbb0b06619e49ff67" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more", + "serde", + "serde_with", + "thiserror", +] + +[[package]] +name = "openvm" +version = "1.4.0" +source = "git+https://github.com/openvm-org/openvm.git?tag=v1.4.0#39ee587f0f73646e3753cb2aa5f34885d4efffe0" +dependencies = [ + "bytemuck", + "num-bigint", + "openvm-custom-insn", + "openvm-platform", + "openvm-rv32im-guest", + "serde", +] + +[[package]] +name = "openvm-compat" +version = "0.0.1" +dependencies = [ + "openvm", + "reth-chainspec", + "reth-ethereum-forks", + "reth-evm", + "reth-evm-ethereum", + "reth-execution-types", + "reth-primitives", + "reth-primitives-traits", + "reth-scroll-chainspec", + "reth-scroll-evm", + "reth-scroll-forks", + "reth-scroll-primitives", + "reth-storage-errors", + "reth-trie", + "reth-trie-sparse", + "scroll-alloy-consensus", + "scroll-alloy-rpc-types", +] + +[[package]] +name = "openvm-custom-insn" +version = "0.1.0" +source = "git+https://github.com/openvm-org/openvm.git?tag=v1.4.0#39ee587f0f73646e3753cb2aa5f34885d4efffe0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "openvm-platform" +version = "1.4.0" +source = "git+https://github.com/openvm-org/openvm.git?tag=v1.4.0#39ee587f0f73646e3753cb2aa5f34885d4efffe0" +dependencies = [ + "libm", + "openvm-custom-insn", + "openvm-rv32im-guest", +] + +[[package]] +name = "openvm-rv32im-guest" +version = "1.4.0" +source = "git+https://github.com/openvm-org/openvm.git?tag=v1.4.0#39ee587f0f73646e3753cb2aa5f34885d4efffe0" +dependencies = [ + "openvm-custom-insn", + "p3-field", + "strum_macros 0.26.4", +] + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p3-field" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=539bbc84085efb609f4f62cb03cf49588388abdb#539bbc84085efb609f4f62cb03cf49588388abdb" +dependencies = [ + "itertools 0.14.0", + "num-bigint", + "num-integer", + "num-traits", + "nums", + "p3-maybe-rayon", + "p3-util", + "rand 0.8.5", + "serde", + "tracing", +] + +[[package]] +name = "p3-maybe-rayon" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=539bbc84085efb609f4f62cb03cf49588388abdb#539bbc84085efb609f4f62cb03cf49588388abdb" + +[[package]] +name = "p3-util" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=539bbc84085efb609f4f62cb03cf49588388abdb#539bbc84085efb609f4f62cb03cf49588388abdb" +dependencies = [ + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pest" +version = "2.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" +dependencies = [ + "memchr", + "ucd-trie", +] + +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_macros", + "phf_shared", + "serde", +] + +[[package]] +name = "phf_generator" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" +dependencies = [ + "fastrand", + "phf_shared", +] + +[[package]] +name = "phf_macros" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "serde", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", + "serde", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "reth-chainspec" +version = "1.8.2" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-genesis", + "alloy-primitives", + "alloy-trie", + "auto_impl", + "derive_more", + "reth-ethereum-forks", + "reth-network-peers", + "reth-primitives-traits", + "serde_json", +] + +[[package]] +name = "reth-codecs" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-trie", + "bytes", + "modular-bitfield", + "op-alloy-consensus", + "reth-codecs-derive", + "reth-zstd-compressors", + "serde", +] + +[[package]] +name = "reth-codecs-derive" +version = "1.8.2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "reth-db-models" +version = "1.8.2" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "reth-primitives-traits", +] + +[[package]] +name = "reth-ethereum-forks" +version = "1.8.2" +dependencies = [ + "alloy-eip2124", + "alloy-hardforks", + "alloy-primitives", + "auto_impl", + "once_cell", +] + +[[package]] +name = "reth-ethereum-primitives" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-serde", + "reth-codecs", + "reth-primitives-traits", + "serde", + "serde_with", +] + +[[package]] +name = "reth-evm" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-primitives", + "auto_impl", + "derive_more", + "futures-util", + "reth-execution-errors", + "reth-execution-types", + "reth-primitives-traits", + "reth-storage-api", + "reth-storage-errors", + "reth-trie-common", + "revm", + "scroll-alloy-evm", +] + +[[package]] +name = "reth-evm-ethereum" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-primitives", + "alloy-rpc-types-engine", + "reth-chainspec", + "reth-ethereum-forks", + "reth-ethereum-primitives", + "reth-evm", + "reth-execution-types", + "reth-primitives-traits", + "reth-storage-errors", + "revm", +] + +[[package]] +name = "reth-execution-errors" +version = "1.8.2" +dependencies = [ + "alloy-evm", + "alloy-primitives", + "alloy-rlp", + "nybbles", + "reth-storage-errors", + "thiserror", +] + +[[package]] +name = "reth-execution-types" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-primitives", + "derive_more", + "reth-ethereum-primitives", + "reth-primitives-traits", + "reth-trie-common", + "revm", +] + +[[package]] +name = "reth-network-peers" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde_with", + "thiserror", + "url", +] + +[[package]] +name = "reth-primitives" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "once_cell", + "reth-ethereum-forks", + "reth-ethereum-primitives", + "reth-primitives-traits", + "reth-static-file-types", +] + +[[package]] +name = "reth-primitives-traits" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-trie", + "auto_impl", + "bytes", + "derive_more", + "once_cell", + "op-alloy-consensus", + "reth-codecs", + "revm-bytecode", + "revm-primitives", + "revm-state", + "scroll-alloy-consensus", + "secp256k1", + "serde", + "serde_with", + "thiserror", +] + +[[package]] +name = "reth-prune-types" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "derive_more", + "thiserror", +] + +[[package]] +name = "reth-scroll-chainspec" +version = "1.8.2" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-serde", + "auto_impl", + "derive_more", + "once_cell", + "reth-chainspec", + "reth-ethereum-forks", + "reth-network-peers", + "reth-primitives-traits", + "reth-scroll-forks", + "reth-trie-common", + "scroll-alloy-hardforks", + "serde", + "serde_json", +] + +[[package]] +name = "reth-scroll-evm" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-primitives", + "alloy-rpc-types-engine", + "derive_more", + "reth-chainspec", + "reth-evm", + "reth-execution-types", + "reth-primitives", + "reth-primitives-traits", + "reth-scroll-chainspec", + "reth-scroll-forks", + "reth-scroll-primitives", + "reth-storage-api", + "revm", + "revm-primitives", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-hardforks", + "thiserror", + "tracing", +] + +[[package]] +name = "reth-scroll-forks" +version = "1.8.2" +dependencies = [ + "alloy-chains", + "alloy-primitives", + "auto_impl", + "once_cell", + "reth-ethereum-forks", + "scroll-alloy-hardforks", +] + +[[package]] +name = "reth-scroll-primitives" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "bytes", + "once_cell", + "reth-codecs", + "reth-primitives-traits", + "scroll-alloy-consensus", + "serde", +] + +[[package]] +name = "reth-stages-types" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "reth-trie-common", +] + +[[package]] +name = "reth-static-file-types" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "derive_more", + "serde", + "strum", +] + +[[package]] +name = "reth-storage-api" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-engine", + "auto_impl", + "reth-chainspec", + "reth-db-models", + "reth-ethereum-primitives", + "reth-execution-types", + "reth-primitives-traits", + "reth-prune-types", + "reth-stages-types", + "reth-storage-errors", + "reth-trie-common", + "revm-database", +] + +[[package]] +name = "reth-storage-errors" +version = "1.8.2" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "derive_more", + "reth-primitives-traits", + "reth-prune-types", + "reth-static-file-types", + "revm-database-interface", + "thiserror", +] + +[[package]] +name = "reth-trie" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "auto_impl", + "itertools 0.14.0", + "reth-execution-errors", + "reth-primitives-traits", + "reth-stages-types", + "reth-storage-errors", + "reth-trie-common", + "reth-trie-sparse", + "revm-database", + "tracing", +] + +[[package]] +name = "reth-trie-common" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "derive_more", + "itertools 0.14.0", + "nybbles", + "reth-primitives-traits", + "revm-database", +] + +[[package]] +name = "reth-trie-sparse" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "auto_impl", + "reth-execution-errors", + "reth-primitives-traits", + "reth-trie-common", + "smallvec", + "tracing", +] + +[[package]] +name = "reth-zstd-compressors" +version = "1.8.2" +dependencies = [ + "zstd", +] + +[[package]] +name = "revm" +version = "30.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "revm-bytecode", + "revm-context", + "revm-context-interface", + "revm-database", + "revm-database-interface", + "revm-handler", + "revm-inspector", + "revm-interpreter", + "revm-precompile", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-bytecode" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "bitvec", + "phf", + "revm-primitives", + "serde", +] + +[[package]] +name = "revm-context" +version = "10.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "bitvec", + "cfg-if", + "derive-where", + "revm-bytecode", + "revm-context-interface", + "revm-database-interface", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-context-interface" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "auto_impl", + "either", + "revm-database-interface", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-database" +version = "9.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "revm-bytecode", + "revm-database-interface", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-database-interface" +version = "8.0.2" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "auto_impl", + "either", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-handler" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "auto_impl", + "derive-where", + "revm-bytecode", + "revm-context", + "revm-context-interface", + "revm-database-interface", + "revm-interpreter", + "revm-precompile", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-inspector" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "auto_impl", + "either", + "revm-context", + "revm-database-interface", + "revm-handler", + "revm-interpreter", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-interpreter" +version = "27.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "revm-bytecode", + "revm-context-interface", + "revm-primitives", + "revm-state", +] + +[[package]] +name = "revm-precompile" +version = "28.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "ark-bls12-381", + "ark-bn254", + "ark-ec", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "arrayref", + "aurora-engine-modexp", + "cfg-if", + "k256", + "p256", + "revm-primitives", + "ripemd", + "sha2", +] + +[[package]] +name = "revm-primitives" +version = "21.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "alloy-primitives", + "num_enum", + "once_cell", + "serde", +] + +[[package]] +name = "revm-scroll" +version = "0.1.0" +source = "git+https://github.com/scroll-tech/scroll-revm?tag=scroll-v91#a1ac004adf0019d9926defc4e31e6a76a7e558f7" +dependencies = [ + "auto_impl", + "enumn", + "once_cell", + "revm", + "revm-inspector", + "revm-primitives", +] + +[[package]] +name = "revm-state" +version = "8.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" +dependencies = [ + "bitflags", + "revm-bytecode", + "revm-primitives", + "serde", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "ruint" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "ark-ff 0.5.0", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp", + "ruint-macro", + "serde_core", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.27", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "rusty-fork" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "scroll-alloy-consensus" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more", + "reth-codecs", + "serde", + "serde_with", +] + +[[package]] +name = "scroll-alloy-evm" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-primitives", + "auto_impl", + "revm", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-hardforks", +] + +[[package]] +name = "scroll-alloy-hardforks" +version = "1.8.2" +dependencies = [ + "alloy-hardforks", + "auto_impl", +] + +[[package]] +name = "scroll-alloy-rpc-types" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "derive_more", + "scroll-alloy-consensus", + "serde", + "serde_json", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" +dependencies = [ + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_with" +version = "3.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.12.0", + "schemars 0.9.0", + "schemars 1.1.0", + "serde_core", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros 0.27.2", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.110", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff790eb176cc81bb8936aed0f7b9f14fc4670069a2d371b3e3b0ecce908b2cb3" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap 2.12.0", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.110", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +]
diff --git reth/crates/scroll/openvm-compat/Cargo.toml scroll-reth/crates/scroll/openvm-compat/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4cf2cf3044ea4227219f0964440adaab42bc11cb --- /dev/null +++ scroll-reth/crates/scroll/openvm-compat/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "openvm-compat" +version = "0.0.1" +edition = "2024" + +[workspace] + +[dependencies] +openvm = { git = "https://github.com/openvm-org/openvm.git", tag = "v1.4.0", default-features = false } + +reth-chainspec = { path = "../../chainspec", default-features = false } +reth-evm = { path = "../../evm/evm", default-features = false } +reth-evm-ethereum = { path = "../../ethereum/evm", default-features = false } +reth-ethereum-forks = { path = "../../ethereum/hardforks", default-features = false } +reth-execution-types = { path = "../../evm/execution-types", default-features = false } +reth-primitives = { path = "../../primitives", default-features = false } +reth-primitives-traits = { path = "../../primitives-traits", default-features = false } +reth-storage-errors = { path = "../../storage/errors", default-features = false } +reth-trie = { path = "../../trie/trie", default-features = false } +reth-trie-sparse = { path = "../../trie/sparse", default-features = false } + +reth-scroll-chainspec = { path = "../chainspec", default-features = false } +reth-scroll-evm = { path = "../evm", default-features = false } +reth-scroll-forks = { path = "../hardforks", default-features = false } +reth-scroll-primitives = { path = "../primitives", default-features = false } + +scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false } +scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } + +[patch.crates-io] +revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
diff --git reth/crates/scroll/openvm-compat/src/main.rs scroll-reth/crates/scroll/openvm-compat/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..8fd9e8ae9c79267a45b20339a75ce2ed5de8f199 --- /dev/null +++ scroll-reth/crates/scroll/openvm-compat/src/main.rs @@ -0,0 +1,9 @@ +//! `OpenVM` compatibility bin. + +#![no_main] +#![no_std] + +openvm::entry!(main); + +#[allow(dead_code)] +const fn main() {}
diff --git reth/crates/scroll/payload/Cargo.toml scroll-reth/crates/scroll/payload/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c3eeb840f0be4f378f91a19acde2ea1c0ad74348 --- /dev/null +++ scroll-reth/crates/scroll/payload/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "reth-scroll-payload" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# alloy +alloy-rlp.workspace = true +alloy-consensus.workspace = true +alloy-primitives.workspace = true + +# scroll-alloy +reth-scroll-evm.workspace = true +scroll-alloy-hardforks.workspace = true + +# revm +revm.workspace = true + +# reth +reth-basic-payload-builder.workspace = true +reth-chainspec.workspace = true +reth-chain-state.workspace = true +reth-evm.workspace = true +reth-execution-types.workspace = true +reth-payload-builder.workspace = true +reth-payload-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-revm.workspace = true +reth-storage-api.workspace = true +reth-transaction-pool.workspace = true +reth-payload-util.workspace = true + +# scroll +reth-scroll-chainspec.workspace = true +reth-scroll-primitives.workspace = true +reth-scroll-engine-primitives.workspace = true + +# misc +futures-util = { workspace = true, optional = true } +thiserror.workspace = true +tracing.workspace = true + +[features] +test-utils = [ + "dep:futures-util", + "reth-payload-builder/test-utils", + "reth-primitives-traits/test-utils", + "reth-transaction-pool/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-revm/test-utils", +]
diff --git reth/crates/scroll/payload/src/builder.rs scroll-reth/crates/scroll/payload/src/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..1022ab80aad1d2a2a32316506d75c964d502f480 --- /dev/null +++ scroll-reth/crates/scroll/payload/src/builder.rs @@ -0,0 +1,612 @@ +//! Scroll's payload builder implementation. + +use super::ScrollPayloadBuilderError; +use crate::config::{PayloadBuildingBreaker, ScrollBuilderConfig}; + +use alloy_consensus::{Transaction, Typed2718}; +use alloy_primitives::U256; +use alloy_rlp::Encodable; +use core::fmt::Debug; +use reth_basic_payload_builder::{ + is_better_payload, BuildArguments, BuildOutcome, BuildOutcomeKind, MissingPayloadBehaviour, + PayloadBuilder, PayloadConfig, +}; +use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_evm::{ + block::{BlockExecutionError, BlockValidationError}, + execute::{BlockBuilder, BlockBuilderOutcome, ProviderError}, + ConfigureEvm, Database, Evm, +}; +use reth_execution_types::ExecutionOutcome; +use reth_payload_builder::PayloadId; +use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction, TxTy}; +use reth_revm::{cancelled::CancelOnDrop, database::StateProviderDatabase, db::State}; +use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; +use reth_scroll_engine_primitives::{ScrollBuiltPayload, ScrollPayloadBuilderAttributes}; +use reth_scroll_evm::{ScrollBaseFeeProvider, ScrollNextBlockEnvAttributes}; +use reth_scroll_primitives::{ScrollPrimitives, ScrollTransactionSigned}; +use reth_storage_api::{BaseFeeProvider, StateProvider, StateProviderFactory}; +use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; +use revm::context::Block; +use scroll_alloy_hardforks::ScrollHardforks; +use std::{boxed::Box, sync::Arc, vec, vec::Vec}; + +/// A type that returns the [`PayloadTransactions`] that should be included in the pool. +pub trait ScrollPayloadTransactions<Transaction>: Clone + Send + Sync + Unpin + 'static { + /// Returns an iterator that yields the transaction in the order they should get included in the + /// new payload. + fn best_transactions<Pool: TransactionPool<Transaction = Transaction>>( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions<Transaction = Transaction>; +} + +impl<T: PoolTransaction> ScrollPayloadTransactions<T> for () { + fn best_transactions<Pool: TransactionPool<Transaction = T>>( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions<Transaction = T> { + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) + } +} + +/// Scroll's payload builder. +#[derive(Clone, Debug)] +pub struct ScrollPayloadBuilder<Pool, Client, Evm, Txs = ()> { + /// The type responsible for creating the evm. + pub evm_config: Evm, + /// Transaction pool. + pub pool: Pool, + /// Node client + pub client: Client, + /// The type responsible for yielding the best transactions to include in a payload. + pub best_transactions: Txs, + /// Payload builder configuration. + pub builder_config: ScrollBuilderConfig, +} + +impl<Pool, Evm, Client> ScrollPayloadBuilder<Pool, Client, Evm> { + /// Creates a new [`ScrollPayloadBuilder`]. + pub const fn new( + pool: Pool, + evm_config: Evm, + client: Client, + builder_config: ScrollBuilderConfig, + ) -> Self { + Self { evm_config, pool, client, best_transactions: (), builder_config } + } +} + +impl<Pool, Client, Evm, Txs> ScrollPayloadBuilder<Pool, Client, Evm, Txs> { + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions<T>( + self, + best_transactions: T, + ) -> ScrollPayloadBuilder<Pool, Client, Evm, T> { + let Self { evm_config, pool, client, builder_config, .. } = self; + ScrollPayloadBuilder { evm_config, pool, client, best_transactions, builder_config } + } +} + +impl<Pool, Client, Evm, T> ScrollPayloadBuilder<Pool, Client, Evm, T> +where + Pool: TransactionPool<Transaction: PoolTransaction<Consensus = ScrollTransactionSigned>>, + Client: StateProviderFactory + + ChainSpecProvider< + ChainSpec: EthChainSpec + + ScrollHardforks + + ChainConfig<Config = ScrollChainConfig> + + Clone, + >, + Evm: + ConfigureEvm<Primitives = ScrollPrimitives, NextBlockEnvCtx = ScrollNextBlockEnvAttributes>, +{ + /// Constructs a Scroll payload from the transactions sent via the + /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in + /// the payload attributes, the transaction pool will be ignored and the only transactions + /// included in the payload will be those sent through the attributes. + /// + /// Given build arguments including a Scroll client, transaction pool, + /// and configuration, this function creates a transaction payload. Returns + /// a result indicating success with the payload or an error in case of failure. + fn build_payload<'a, Txs>( + &self, + args: BuildArguments<ScrollPayloadBuilderAttributes, ScrollBuiltPayload>, + best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, + ) -> Result<BuildOutcome<ScrollBuiltPayload>, PayloadBuilderError> + where + Txs: PayloadTransactions<Transaction: PoolTransaction<Consensus = ScrollTransactionSigned>>, + { + let BuildArguments { mut cached_reads, config, cancel, best_payload } = args; + + let ctx = ScrollPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: self.client.chain_spec(), + config, + cancel, + best_payload, + }; + + let builder = ScrollBuilder::new(best); + + let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(&state_provider); + + if ctx.attributes().no_tx_pool { + builder.build(state, &state_provider, ctx, &self.builder_config) + } else { + // sequencer mode we can reuse cachedreads from previous runs + builder.build(cached_reads.as_db_mut(state), &state_provider, ctx, &self.builder_config) + } + .map(|out| out.with_cached_reads(cached_reads)) + } +} + +/// Implementation of the [`PayloadBuilder`] trait for [`ScrollPayloadBuilder`]. +impl<Pool, Client, Evm, Txs> PayloadBuilder for ScrollPayloadBuilder<Pool, Client, Evm, Txs> +where + Client: StateProviderFactory + + ChainSpecProvider< + ChainSpec: EthChainSpec + + ScrollHardforks + + ChainConfig<Config = ScrollChainConfig> + + Clone, + > + Clone, + Pool: TransactionPool<Transaction: PoolTransaction<Consensus = ScrollTransactionSigned>>, + Evm: + ConfigureEvm<Primitives = ScrollPrimitives, NextBlockEnvCtx = ScrollNextBlockEnvAttributes>, + Txs: ScrollPayloadTransactions<Pool::Transaction>, +{ + type Attributes = ScrollPayloadBuilderAttributes; + type BuiltPayload = ScrollBuiltPayload; + + fn try_build( + &self, + args: BuildArguments<Self::Attributes, Self::BuiltPayload>, + ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> { + let pool = self.pool.clone(); + self.build_payload(args, |attrs| self.best_transactions.best_transactions(pool, attrs)) + } + + fn on_missing_payload( + &self, + _args: BuildArguments<Self::Attributes, Self::BuiltPayload>, + ) -> MissingPayloadBehaviour<Self::BuiltPayload> { + // we want to await the job that's already in progress because that should be returned as + // is, there's no benefit in racing another job + MissingPayloadBehaviour::AwaitInProgress + } + + // NOTE: this should only be used for testing purposes because this doesn't have access to L1 + // system txs, hence on_missing_payload we return [MissingPayloadBehaviour::AwaitInProgress]. + fn build_empty_payload( + &self, + config: PayloadConfig<Self::Attributes>, + ) -> Result<Self::BuiltPayload, PayloadBuilderError> { + let args = BuildArguments { + config, + cached_reads: Default::default(), + cancel: Default::default(), + best_payload: None, + }; + self.build_payload(args, |_| NoopPayloadTransactions::<Pool::Transaction>::default())? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) + } +} + +/// A builder for a new payload. +pub struct ScrollBuilder<'a, Txs> { + /// Yields the best transaction to include if transactions from the mempool are allowed. + best: Box<dyn FnOnce(BestTransactionsAttributes) -> Txs + 'a>, +} + +impl<'a, Txs> ScrollBuilder<'a, Txs> { + /// Creates a new [`ScrollBuilder`]. + pub fn new(best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a) -> Self { + Self { best: Box::new(best) } + } +} + +impl<'a, Txs> std::fmt::Debug for ScrollBuilder<'a, Txs> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ScrollBuilder").finish() + } +} + +impl<Txs> ScrollBuilder<'_, Txs> { + /// Builds the payload on top of the state. + pub fn build<EvmConfig, ChainSpec>( + self, + db: impl Database<Error = ProviderError>, + state_provider: impl StateProvider, + ctx: ScrollPayloadBuilderCtx<EvmConfig, ChainSpec>, + builder_config: &ScrollBuilderConfig, + ) -> Result<BuildOutcomeKind<ScrollBuiltPayload>, PayloadBuilderError> + where + EvmConfig: ConfigureEvm< + Primitives = ScrollPrimitives, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, + ChainSpec: EthChainSpec + ScrollHardforks + ChainConfig<Config = ScrollChainConfig> + Clone, + Txs: PayloadTransactions<Transaction: PoolTransaction<Consensus = ScrollTransactionSigned>>, + { + let Self { best } = self; + tracing::debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + let breaker = builder_config.breaker(); + + let mut db = State::builder().with_database(db).with_bundle_update().build(); + + let mut builder = ctx.block_builder(&mut db, builder_config)?; + + // 1. apply pre-execution changes + builder.apply_pre_execution_changes().map_err(|err| { + tracing::warn!(target: "payload_builder", %err, "failed to apply pre-execution changes"); + PayloadBuilderError::Internal(err.into()) + })?; + + // 2. execute sequencer transactions + let mut info = ctx.execute_sequencer_transactions(&mut builder)?; + + // 3. if mem pool transactions are requested we execute them + if !ctx.attributes().no_tx_pool { + let best_txs = best(ctx.best_transaction_attributes(builder.evm_mut().block())); + if ctx + .execute_best_transactions( + &mut info, + &mut builder, + best_txs, + builder_config, + breaker, + )? + .is_some() + { + return Ok(BuildOutcomeKind::Cancelled); + } + + // check if the new payload is even more valuable + if !ctx.is_better_payload(info.total_fees) { + // can skip building the block + return Ok(BuildOutcomeKind::Aborted { fees: info.total_fees }) + } + } + + let BlockBuilderOutcome { execution_result, hashed_state, trie_updates, mut block } = + builder.finish(state_provider)?; + + // set the block fields using the hints from the payload attributes. + let (mut scroll_block, senders) = block.split(); + scroll_block = scroll_block.map_header(|mut header| { + if let Some(extra_data) = &ctx.config.attributes.block_data_hint.extra_data { + header.extra_data = extra_data.clone(); + } + if let Some(state_root) = ctx.config.attributes.block_data_hint.state_root { + header.state_root = state_root; + } + if let Some(coinbase) = ctx.config.attributes.block_data_hint.coinbase { + header.beneficiary = coinbase; + } + if let Some(nonce) = ctx.config.attributes.block_data_hint.nonce { + header.nonce = nonce.into() + } + if let Some(difficulty) = ctx.config.attributes.block_data_hint.difficulty { + header.difficulty = difficulty; + } + header + }); + block = RecoveredBlock::new_unhashed(scroll_block, senders); + + let sealed_block = Arc::new(block.sealed_block().clone()); + tracing::debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block"); + + let execution_outcome = ExecutionOutcome::new( + db.take_bundle(), + vec![execution_result.receipts], + block.number, + Vec::new(), + ); + + // create the executed block data + let executed: ExecutedBlockWithTrieUpdates<ScrollPrimitives> = + ExecutedBlockWithTrieUpdates { + block: ExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + }, + trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), + }; + + let no_tx_pool = ctx.attributes().no_tx_pool; + + let payload = ScrollBuiltPayload::new( + ctx.payload_id(), + sealed_block, + Some(executed), + info.total_fees, + ); + + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included + // in the payload. In other words, the payload is deterministic and we can + // freeze it once we've successfully built it. + Ok(BuildOutcomeKind::Freeze(payload)) + } else { + Ok(BuildOutcomeKind::Better { payload }) + } + } +} + +/// Container type that holds all necessities to build a new payload. +#[derive(Debug)] +pub struct ScrollPayloadBuilderCtx<Evm: ConfigureEvm, ChainSpec> { + /// The type that knows how to perform system calls and configure the evm. + pub evm_config: Evm, + /// The chainspec + pub chain_spec: ChainSpec, + /// How to build the payload. + pub config: PayloadConfig<ScrollPayloadBuilderAttributes>, + /// Marker to check whether the job has been cancelled. + pub cancel: CancelOnDrop, + /// The currently best payload. + pub best_payload: Option<ScrollBuiltPayload>, +} + +impl<Evm, ChainSpec> ScrollPayloadBuilderCtx<Evm, ChainSpec> +where + Evm: + ConfigureEvm<Primitives = ScrollPrimitives, NextBlockEnvCtx = ScrollNextBlockEnvAttributes>, + ChainSpec: EthChainSpec + ScrollHardforks + ChainConfig<Config = ScrollChainConfig> + Clone, +{ + /// Returns the parent block the payload will be build on. + #[allow(clippy::missing_const_for_fn)] + pub fn parent(&self) -> &SealedHeader { + &self.config.parent_header + } + + /// Returns the builder attributes. + pub const fn attributes(&self) -> &ScrollPayloadBuilderAttributes { + &self.config.attributes + } + + /// Returns the current fee settings for transactions from the mempool + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { + BestTransactionsAttributes::new( + block_env.basefee(), + block_env.blob_gasprice().map(|p| p as u64), + ) + } + + /// Returns the unique id for this payload job. + pub fn payload_id(&self) -> PayloadId { + self.attributes().payload_id() + } + + /// Returns true if the fees are higher than the previous payload. + pub fn is_better_payload(&self, total_fees: U256) -> bool { + is_better_payload(self.best_payload.as_ref(), total_fees) + } + + /// Prepares a [`BlockBuilder`] for the next block. + pub fn block_builder<'a, DB: Database>( + &'a self, + db: &'a mut State<DB>, + builder_config: &ScrollBuilderConfig, + ) -> Result<impl BlockBuilder<Primitives = Evm::Primitives> + 'a, PayloadBuilderError> { + // get the base fee for the attributes. + let base_fee_provider = ScrollBaseFeeProvider::new(self.chain_spec.clone()); + let base_fee: u64 = base_fee_provider + .next_block_base_fee(db, self.parent().header(), self.attributes().timestamp()) + .map_err(|err| PayloadBuilderError::Other(Box::new(err)))?; + + self.evm_config + .builder_for_next_block( + db, + self.parent(), + ScrollNextBlockEnvAttributes { + timestamp: self.attributes().timestamp(), + suggested_fee_recipient: self.attributes().suggested_fee_recipient(), + gas_limit: self + .attributes() + .gas_limit + .unwrap_or_else(|| builder_config.gas_limit.unwrap_or_default()), + base_fee, + }, + ) + .map_err(PayloadBuilderError::other) + } +} + +impl<Evm, ChainSpec> ScrollPayloadBuilderCtx<Evm, ChainSpec> +where + Evm: + ConfigureEvm<Primitives = ScrollPrimitives, NextBlockEnvCtx = ScrollNextBlockEnvAttributes>, + ChainSpec: EthChainSpec + ScrollHardforks + ChainConfig<Config = ScrollChainConfig> + Clone, +{ + /// Executes all sequencer transactions that are included in the payload attributes. + pub fn execute_sequencer_transactions( + &self, + builder: &mut impl BlockBuilder<Primitives = Evm::Primitives>, + ) -> Result<ExecutionInfo, PayloadBuilderError> { + let mut info = ExecutionInfo::new(); + let block_gas_limit = builder.evm().block().gas_limit(); + let mut gas_spent_by_transactions = Vec::new(); + + for sequencer_tx in &self.attributes().transactions { + // A sequencer's block should never contain blob transactions. + if sequencer_tx.value().is_eip4844() { + return Err(PayloadBuilderError::other( + ScrollPayloadBuilderError::BlobTransactionRejected, + )) + } + // Convert the transaction to a [RecoveredTx]. This is + // purely for the purposes of utilizing the `evm_config.tx_env`` function. + // Deposit transactions do not have signatures, so if the tx is a deposit, this + // will just pull in its `from` address. + let sequencer_tx = sequencer_tx.value().try_clone_into_recovered().map_err(|_| { + PayloadBuilderError::other(ScrollPayloadBuilderError::TransactionEcRecoverFailed) + })?; + + let tx_gas = sequencer_tx.gas_limit(); + // check we don't go over the block gas limit + if info.cumulative_gas_used + tx_gas > block_gas_limit { + gas_spent_by_transactions.push(tx_gas); + return Err(PayloadBuilderError::other( + ScrollPayloadBuilderError::BlockGasLimitExceededBySequencerTransactions { + gas_spent_by_tx: gas_spent_by_transactions, + gas: block_gas_limit, + }, + )); + } + + let gas_used = match builder.execute_transaction(sequencer_tx.clone()) { + Ok(gas_used) => gas_used, + Err(BlockExecutionError::Validation(BlockValidationError::InvalidTx { + error, + .. + })) => { + tracing::trace!(target: "payload_builder", %error, ?sequencer_tx, "Error in sequencer transaction, skipping."); + continue + } + Err(err) => { + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))) + } + }; + + // unspent gas is not refunded and not reallocated to other transactions for L1 + // messages. + let gas_used = + if sequencer_tx.is_l1_message() { sequencer_tx.gas_limit() } else { gas_used }; + + // add gas used by the transaction to cumulative gas used + info.cumulative_gas_used += gas_used; + gas_spent_by_transactions.push(gas_used); + } + + Ok(info) + } + + /// Executes the given best transactions and updates the execution info. + /// + /// Returns `Ok(Some(())` if the job was cancelled. + pub fn execute_best_transactions( + &self, + info: &mut ExecutionInfo, + builder: &mut impl BlockBuilder<Primitives = Evm::Primitives>, + mut best_txs: impl PayloadTransactions< + Transaction: PoolTransaction<Consensus = TxTy<Evm::Primitives>>, + >, + builder_config: &ScrollBuilderConfig, + breaker: PayloadBuildingBreaker, + ) -> Result<Option<()>, PayloadBuilderError> { + let block_gas_limit = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); + + while let Some(tx) = best_txs.next(()) { + let tx = tx.into_consensus(); + if info.is_tx_over_limits(tx.inner(), block_gas_limit, builder_config.max_da_block_size) + { + // we can't fit this transaction into the block, so we need to mark it as + // invalid which also removes all dependent transaction from + // the iterator before we can continue + best_txs.mark_invalid(tx.signer(), tx.nonce()); + continue + } + + // A sequencer's block should never contain blob or deposit transactions from the pool. + if tx.is_eip4844() || tx.is_l1_message() { + best_txs.mark_invalid(tx.signer(), tx.nonce()); + continue + } + + // check if the job was cancelled, if so we can exit early + if self.cancel.is_cancelled() { + return Ok(Some(())) + } + + // check if the execution needs to be halted. + if breaker.should_break(info.cumulative_gas_used, info.cumulative_da_bytes_used) { + tracing::trace!(target: "scroll::payload_builder", ?info, "breaking execution loop"); + return Ok(None); + } + + let gas_used = match builder.execute_transaction(tx.clone()) { + Ok(gas_used) => gas_used, + Err(BlockExecutionError::Validation(BlockValidationError::InvalidTx { + error, + .. + })) => { + if error.is_nonce_too_low() { + // if the nonce is too low, we can skip this transaction + tracing::trace!(target: "payload_builder", %error, ?tx, "skipping nonce too low transaction"); + } else { + // if the transaction is invalid, we can skip it and all of its + // descendants + tracing::trace!(target: "payload_builder", %error, ?tx, "skipping invalid transaction and its descendants"); + best_txs.mark_invalid(tx.signer(), tx.nonce()); + } + continue + } + Err(err) => { + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(Box::new(err))) + } + }; + + // add gas used by the transaction to cumulative gas used, before creating the + // receipt + info.cumulative_gas_used += gas_used; + info.cumulative_da_bytes_used += tx.length() as u64; + + // update add to total fees + let miner_fee = tx + .effective_tip_per_gas(base_fee) + .expect("fee is always valid; execution succeeded"); + info.total_fees += U256::from(miner_fee) * U256::from(gas_used); + } + + Ok(None) + } +} + +/// This acts as the container for executed transactions and its byproducts (receipts, gas used) +#[derive(Default, Debug)] +pub struct ExecutionInfo { + /// All gas used so far + pub cumulative_gas_used: u64, + /// Estimated DA size + pub cumulative_da_bytes_used: u64, + /// Tracks fees from executed mempool transactions + pub total_fees: U256, +} + +impl ExecutionInfo { + /// Create a new instance with allocated slots. + pub const fn new() -> Self { + Self { cumulative_gas_used: 0, cumulative_da_bytes_used: 0, total_fees: U256::ZERO } + } + + /// Returns true if the transaction would exceed the block limits: + /// - block gas limit: ensures the transaction still fits into the block. + pub fn is_tx_over_limits( + &self, + tx: &(impl Encodable + Transaction), + block_gas_limit: u64, + block_data_limit: Option<u64>, + ) -> bool { + if block_data_limit + .is_some_and(|da_limit| self.cumulative_da_bytes_used + tx.length() as u64 > da_limit) + { + return true; + } + + self.cumulative_gas_used + tx.gas_limit() > block_gas_limit + } +}
diff --git reth/crates/scroll/payload/src/config.rs scroll-reth/crates/scroll/payload/src/config.rs new file mode 100644 index 0000000000000000000000000000000000000000..63993b60b9b40dd3cb2081c7b1b2eaf74f8b6ee4 --- /dev/null +++ scroll-reth/crates/scroll/payload/src/config.rs @@ -0,0 +1,131 @@ +//! Configuration for the payload builder. + +use core::time::Duration; +use reth_chainspec::MIN_TRANSACTION_GAS; +use std::{fmt::Debug, time::Instant}; + +/// Settings for the Scroll builder. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ScrollBuilderConfig { + /// Gas limit. + pub gas_limit: Option<u64>, + /// Time limit for payload building. + pub time_limit: Duration, + /// Maximum total data availability size for a block. + pub max_da_block_size: Option<u64>, +} + +/// Minimal data bytes size per transaction. +pub const MIN_TRANSACTION_DATA_SIZE: u64 = 115u64; + +impl ScrollBuilderConfig { + /// Returns a new instance of [`ScrollBuilderConfig`]. + pub const fn new( + gas_limit: Option<u64>, + time_limit: Duration, + max_da_block_size: Option<u64>, + ) -> Self { + Self { gas_limit, time_limit, max_da_block_size } + } + + /// Returns the [`PayloadBuildingBreaker`] for the config. + pub(super) fn breaker(&self) -> PayloadBuildingBreaker { + PayloadBuildingBreaker::new(self.time_limit, self.gas_limit, self.max_da_block_size) + } +} + +/// Used in the [`super::ScrollPayloadBuilder`] to exit the transactions execution loop early. +#[derive(Debug, Clone)] +pub struct PayloadBuildingBreaker { + start: Instant, + time_limit: Duration, + gas_limit: Option<u64>, + max_da_block_size: Option<u64>, +} + +impl PayloadBuildingBreaker { + /// Returns a new instance of the [`PayloadBuildingBreaker`]. + fn new(time_limit: Duration, gas_limit: Option<u64>, max_da_block_size: Option<u64>) -> Self { + Self { start: Instant::now(), time_limit, gas_limit, max_da_block_size } + } + + /// Returns whether the payload building should stop. + pub(super) fn should_break( + &self, + cumulative_gas_used: u64, + cumulative_da_size_used: u64, + ) -> bool { + // Check time limit + if self.start.elapsed() >= self.time_limit { + return true; + } + + // Check gas limit if configured + if let Some(gas_limit) = self.gas_limit && + cumulative_gas_used > gas_limit.saturating_sub(MIN_TRANSACTION_GAS) + { + return true; + } + + // Check data availability size limit if configured + if let Some(max_size) = self.max_da_block_size && + cumulative_da_size_used > max_size.saturating_sub(MIN_TRANSACTION_DATA_SIZE) + { + return true; + } + + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_should_break_on_time_limit() { + let breaker = PayloadBuildingBreaker::new( + Duration::from_millis(200), + Some(2 * MIN_TRANSACTION_GAS), + Some(2 * MIN_TRANSACTION_DATA_SIZE), + ); + assert!(!breaker.should_break(MIN_TRANSACTION_GAS, MIN_TRANSACTION_DATA_SIZE)); + std::thread::sleep(Duration::from_millis(201)); + assert!(breaker.should_break(MIN_TRANSACTION_GAS, MIN_TRANSACTION_DATA_SIZE)); + } + + #[test] + fn test_should_break_on_gas_limit() { + let breaker = PayloadBuildingBreaker::new( + Duration::from_secs(1), + Some(2 * MIN_TRANSACTION_GAS), + Some(2 * MIN_TRANSACTION_DATA_SIZE), + ); + assert!(!breaker.should_break(MIN_TRANSACTION_GAS, MIN_TRANSACTION_DATA_SIZE)); + assert!(breaker.should_break(MIN_TRANSACTION_GAS + 1, MIN_TRANSACTION_DATA_SIZE)); + } + + #[test] + fn test_should_break_on_data_size_limit() { + let breaker = PayloadBuildingBreaker::new( + Duration::from_secs(1), + Some(2 * MIN_TRANSACTION_GAS), + Some(2 * MIN_TRANSACTION_DATA_SIZE), + ); + assert!(!breaker.should_break(MIN_TRANSACTION_GAS, MIN_TRANSACTION_DATA_SIZE)); + assert!(breaker.should_break(MIN_TRANSACTION_GAS, MIN_TRANSACTION_DATA_SIZE + 1)); + } + + #[test] + fn test_should_break_with_no_da_limit() { + let breaker = PayloadBuildingBreaker::new( + Duration::from_secs(1), + Some(2 * MIN_TRANSACTION_GAS), + None, // No DA limit + ); + // Should not break on large DA size when no limit is set + assert!(!breaker.should_break(MIN_TRANSACTION_GAS, u64::MAX)); + // But should still break on gas limit + assert!(breaker.should_break(MIN_TRANSACTION_GAS + 1, u64::MAX)); + } +}
diff --git reth/crates/scroll/payload/src/error.rs scroll-reth/crates/scroll/payload/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..6477cb1c6a6404bc8d7d1f193f4a0cbc412465a7 --- /dev/null +++ scroll-reth/crates/scroll/payload/src/error.rs @@ -0,0 +1,19 @@ +/// Scroll specific payload building errors. +#[derive(Debug, thiserror::Error)] +pub enum ScrollPayloadBuilderError { + /// Thrown when a transaction fails to convert to a + /// [`alloy_consensus::transaction::Recovered`]. + #[error("failed to convert deposit transaction to RecoveredTx")] + TransactionEcRecoverFailed, + /// Thrown when a blob transaction is included in a sequencer's block. + #[error("blob transaction included in sequencer block")] + BlobTransactionRejected, + /// Thrown when sequencer transaction gas limit exceeds remaining block gas. + #[error("Sequencer transactions over gas limit: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")] + BlockGasLimitExceededBySequencerTransactions { + /// The gas used by each transaction in the block. + gas_spent_by_tx: Vec<u64>, + /// The block gas limit. + gas: u64, + }, +}
diff --git reth/crates/scroll/payload/src/lib.rs scroll-reth/crates/scroll/payload/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5e3ab637e8b14425d7a4d479bfba7b548bdd1d29 --- /dev/null +++ scroll-reth/crates/scroll/payload/src/lib.rs @@ -0,0 +1,16 @@ +//! Engine Payload related types. + +pub mod builder; +pub use builder::{ScrollPayloadBuilder, ScrollPayloadTransactions}; + +pub mod config; +pub use config::ScrollBuilderConfig; + +mod error; +pub use error::ScrollPayloadBuilderError; + +#[cfg(feature = "test-utils")] +mod test_utils; + +#[cfg(feature = "test-utils")] +pub use test_utils::{NoopPayloadJob, NoopPayloadJobGenerator};
diff --git reth/crates/scroll/payload/src/test_utils.rs scroll-reth/crates/scroll/payload/src/test_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..a9d494f40fdaeef1faa719cff26319224f974cf9 --- /dev/null +++ scroll-reth/crates/scroll/payload/src/test_utils.rs @@ -0,0 +1,70 @@ +use core::{ + fmt::Debug, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use reth_payload_builder::{KeepPayloadJobAlive, PayloadJob, PayloadJobGenerator}; +use reth_payload_primitives::{ + BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, +}; + +/// A [`PayloadJobGenerator`] that doesn't produce any useful payload. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopPayloadJobGenerator<PA, BP> { + _types: core::marker::PhantomData<(PA, BP)>, +} + +impl<PA, BP> PayloadJobGenerator for NoopPayloadJobGenerator<PA, BP> +where + PA: PayloadBuilderAttributes + Default + Debug + Send + Sync, + BP: BuiltPayload + Default + Clone + Debug + Send + Sync + 'static, +{ + type Job = NoopPayloadJob<PA, BP>; + + fn new_payload_job(&self, _attr: PA) -> Result<Self::Job, PayloadBuilderError> { + Ok(NoopPayloadJob::<PA, BP>::default()) + } +} + +/// A [`PayloadJobGenerator`] that doesn't produce any payload. +#[derive(Debug, Default)] +pub struct NoopPayloadJob<PA, BP> { + _types: core::marker::PhantomData<(PA, BP)>, +} + +impl<PA, BP> Future for NoopPayloadJob<PA, BP> { + type Output = Result<(), PayloadBuilderError>; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> { + Poll::Pending + } +} + +impl<PA, BP> PayloadJob for NoopPayloadJob<PA, BP> +where + PA: PayloadBuilderAttributes + Default + Debug, + BP: BuiltPayload + Default + Clone + Debug + 'static, +{ + type PayloadAttributes = PA; + type ResolvePayloadFuture = + futures_util::future::Ready<Result<Self::BuiltPayload, PayloadBuilderError>>; + type BuiltPayload = BP; + + fn best_payload(&self) -> Result<Self::BuiltPayload, PayloadBuilderError> { + Ok(Self::BuiltPayload::default()) + } + + fn payload_attributes(&self) -> Result<Self::PayloadAttributes, PayloadBuilderError> { + Ok(Self::PayloadAttributes::default()) + } + + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + let fut = futures_util::future::ready(self.best_payload()); + (fut, KeepPayloadJobAlive::No) + } +}
diff --git reth/crates/scroll/primitives/Cargo.toml scroll-reth/crates/scroll/primitives/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..be21143bb6ab19bf055b7c8a2934681f4a8ff74c --- /dev/null +++ scroll-reth/crates/scroll/primitives/Cargo.toml @@ -0,0 +1,94 @@ +[package] +name = "reth-scroll-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-codecs = { workspace = true, optional = true } +reth-primitives-traits = { workspace = true, features = ["scroll-alloy-traits"] } +reth-zstd-compressors = { workspace = true, optional = true } + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true + +# scroll +scroll-alloy-consensus.workspace = true + +# codec +bytes = { workspace = true, optional = true } +modular-bitfield = { workspace = true, optional = true } +serde = { workspace = true, optional = true } + +# misc +once_cell.workspace = true + +# test +arbitrary = { workspace = true, features = ["derive"], optional = true } + +[dev-dependencies] +reth-codecs = { workspace = true, features = ["test-utils"] } +rstest.workspace = true +rand.workspace = true + +[features] +default = ["std"] +std = [ + "serde?/std", + "scroll-alloy-consensus/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-rlp/std", + "bytes?/std", + "reth-primitives-traits/std", + "reth-zstd-compressors?/std", + "reth-codecs?/std", + "once_cell/std", + "serde?/std", +] +reth-codec = [ + "dep:reth-codecs", + "std", + "reth-primitives-traits/reth-codec", + "scroll-alloy-consensus/reth-codec", + "dep:bytes", + "dep:modular-bitfield", + "dep:reth-zstd-compressors", +] +serde = [ + "dep:serde", + "scroll-alloy-consensus/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes?/serde", + "reth-codecs?/serde", + "reth-primitives-traits/serde", + "rand/serde", +] +serde-bincode-compat = [ + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "scroll-alloy-consensus/serde-bincode-compat", +] +arbitrary = [ + "dep:arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs?/arbitrary", + "reth-primitives-traits/arbitrary", + "scroll-alloy-consensus/arbitrary", +]
diff --git reth/crates/scroll/primitives/src/lib.rs scroll-reth/crates/scroll/primitives/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..55e2d12ccbd974452c25a1e9a247b56491e148c1 --- /dev/null +++ scroll-reth/crates/scroll/primitives/src/lib.rs @@ -0,0 +1,39 @@ +//! Commonly used types in Scroll. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/scroll-tech/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +use once_cell as _; + +pub mod transaction; +pub use transaction::{tx_type::ScrollTxType, ScrollTransactionSigned}; + +use reth_primitives_traits::Block; + +mod receipt; +pub use receipt::ScrollReceipt; + +/// Scroll-specific block type. +pub type ScrollBlock = alloy_consensus::Block<ScrollTransactionSigned>; + +/// Scroll-specific block body type. +pub type ScrollBlockBody = <ScrollBlock as Block>::Body; + +/// Primitive types for Scroll Node. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct ScrollPrimitives; + +#[cfg(feature = "serde-bincode-compat")] +impl reth_primitives_traits::NodePrimitives for ScrollPrimitives { + type Block = ScrollBlock; + type BlockHeader = alloy_consensus::Header; + type BlockBody = ScrollBlockBody; + type SignedTx = ScrollTransactionSigned; + type Receipt = ScrollReceipt; +}
diff --git reth/crates/scroll/primitives/src/receipt.rs scroll-reth/crates/scroll/primitives/src/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..6e214b310897601a5249cd13b2bd67aa45acc71d --- /dev/null +++ scroll-reth/crates/scroll/primitives/src/receipt.rs @@ -0,0 +1,441 @@ +use alloy_consensus::{ + proofs::ordered_trie_root_with_encoder, Eip2718EncodableReceipt, Eip658Value, Receipt, + ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, +}; +use alloy_eips::{ + eip2718::{Eip2718Result, Encodable2718}, + Decodable2718, +}; +use alloy_primitives::{Bloom, Log, B256, U256}; +use alloy_rlp::{BufMut, Decodable, Encodable, Header}; +use reth_primitives_traits::InMemorySize; +use scroll_alloy_consensus::{ScrollTransactionReceipt, ScrollTxType}; + +/// Typed ethereum transaction receipt. +/// Receipt containing result of transaction execution. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ScrollReceipt { + /// Legacy receipt + Legacy(ScrollTransactionReceipt), + /// EIP-2930 receipt + Eip2930(ScrollTransactionReceipt), + /// EIP-1559 receipt + Eip1559(ScrollTransactionReceipt), + /// EIP-7702 receipt + Eip7702(ScrollTransactionReceipt), + /// L1 message receipt + L1Message(Receipt), +} + +impl ScrollReceipt { + /// Returns [`ScrollTxType`] of the receipt. + pub const fn tx_type(&self) -> ScrollTxType { + match self { + Self::Legacy(_) => ScrollTxType::Legacy, + Self::Eip2930(_) => ScrollTxType::Eip2930, + Self::Eip1559(_) => ScrollTxType::Eip1559, + Self::Eip7702(_) => ScrollTxType::Eip7702, + Self::L1Message(_) => ScrollTxType::L1Message, + } + } + + /// Returns inner [`Receipt`], + pub const fn as_receipt(&self) -> &Receipt { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => &receipt.inner, + Self::L1Message(receipt) => receipt, + } + } + + /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => receipt.rlp_encoded_fields_length_with_bloom(bloom), + Self::L1Message(receipt) => receipt.rlp_encoded_fields_length_with_bloom(bloom), + } + } + + /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => receipt.rlp_encode_fields_with_bloom(bloom, out), + Self::L1Message(receipt) => receipt.rlp_encode_fields_with_bloom(bloom, out), + } + } + + /// Returns RLP header for inner encoding. + pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) } + } + + /// Returns RLP header for inner encoding without bloom. + pub fn rlp_header_inner_without_bloom(&self) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length_without_bloom() } + } + + /// RLP-decodes the receipt from the provided buffer. This does not expect a type byte or + /// network header. + pub fn rlp_decode_inner( + buf: &mut &[u8], + tx_type: ScrollTxType, + ) -> alloy_rlp::Result<ReceiptWithBloom<Self>> { + match tx_type { + ScrollTxType::Legacy => { + let ReceiptWithBloom { receipt, logs_bloom } = + RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; + Ok(ReceiptWithBloom { receipt: Self::Legacy(receipt), logs_bloom }) + } + ScrollTxType::Eip2930 => { + let ReceiptWithBloom { receipt, logs_bloom } = + RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; + Ok(ReceiptWithBloom { receipt: Self::Eip2930(receipt), logs_bloom }) + } + ScrollTxType::Eip1559 => { + let ReceiptWithBloom { receipt, logs_bloom } = + RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; + Ok(ReceiptWithBloom { receipt: Self::Eip1559(receipt), logs_bloom }) + } + ScrollTxType::Eip7702 => { + let ReceiptWithBloom { receipt, logs_bloom } = + RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; + Ok(ReceiptWithBloom { receipt: Self::Eip7702(receipt), logs_bloom }) + } + ScrollTxType::L1Message => { + let ReceiptWithBloom { receipt, logs_bloom } = + RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; + Ok(ReceiptWithBloom { receipt: Self::L1Message(receipt), logs_bloom }) + } + } + } + + /// RLP-encodes receipt fields without an RLP header. + pub fn rlp_encode_fields_without_bloom(&self, out: &mut dyn BufMut) { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => { + receipt.inner.status.encode(out); + receipt.inner.cumulative_gas_used.encode(out); + receipt.inner.logs.encode(out); + } + Self::L1Message(receipt) => { + receipt.status.encode(out); + receipt.cumulative_gas_used.encode(out); + receipt.logs.encode(out); + } + } + } + + /// Returns length of RLP-encoded receipt fields without an RLP header. + pub fn rlp_encoded_fields_length_without_bloom(&self) -> usize { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => { + receipt.inner.status.length() + + receipt.inner.cumulative_gas_used.length() + + receipt.inner.logs.length() + } + Self::L1Message(receipt) => { + receipt.status.length() + + receipt.cumulative_gas_used.length() + + receipt.logs.length() + } + } + } + + /// RLP-decodes the receipt from the provided buffer without bloom. + pub fn rlp_decode_inner_without_bloom( + buf: &mut &[u8], + tx_type: ScrollTxType, + ) -> alloy_rlp::Result<Self> { + let header = Header::decode(buf)?; + if !header.list { + return Err(alloy_rlp::Error::UnexpectedString); + } + + let remaining = buf.len(); + let status = Decodable::decode(buf)?; + let cumulative_gas_used = Decodable::decode(buf)?; + let logs = Decodable::decode(buf)?; + + if buf.len() + header.payload_length != remaining { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + let inner = Receipt { status, cumulative_gas_used, logs }; + + match tx_type { + ScrollTxType::Legacy => { + Ok(Self::Legacy(ScrollTransactionReceipt { inner, l1_fee: Default::default() })) + } + ScrollTxType::Eip2930 => { + Ok(Self::Eip2930(ScrollTransactionReceipt { inner, l1_fee: Default::default() })) + } + ScrollTxType::Eip1559 => { + Ok(Self::Eip1559(ScrollTransactionReceipt { inner, l1_fee: Default::default() })) + } + ScrollTxType::Eip7702 => { + Ok(Self::Eip7702(ScrollTransactionReceipt { inner, l1_fee: Default::default() })) + } + ScrollTxType::L1Message => Ok(Self::L1Message(inner)), + } + } + + /// Returns the l1 fee for the transaction receipt. + pub const fn l1_fee(&self) -> U256 { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => receipt.l1_fee, + Self::L1Message(_) => U256::ZERO, + } + } + + /// Calculates the receipt root for a header for the reference type of [Receipt]. + /// + /// NOTE: Prefer `proofs::calculate_receipt_root` if you have log blooms memoized. + pub fn calculate_receipt_root_no_memo(receipts: &[Self]) -> B256 { + ordered_trie_root_with_encoder(receipts, |r, buf| r.with_bloom_ref().encode_2718(buf)) + } +} + +impl Eip2718EncodableReceipt for ScrollReceipt { + fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + !self.tx_type().is_legacy() as usize + self.rlp_header_inner(bloom).length_with_payload() + } + + fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !self.tx_type().is_legacy() { + out.put_u8(self.tx_type() as u8); + } + self.rlp_header_inner(bloom).encode(out); + self.rlp_encode_fields(bloom, out); + } +} + +impl RlpEncodableReceipt for ScrollReceipt { + fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + let mut len = self.eip2718_encoded_length_with_bloom(bloom); + if !self.tx_type().is_legacy() { + len += Header { + list: false, + payload_length: self.eip2718_encoded_length_with_bloom(bloom), + } + .length(); + } + + len + } + + fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !self.tx_type().is_legacy() { + Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) } + .encode(out); + } + self.eip2718_encode_with_bloom(bloom, out); + } +} + +impl RlpDecodableReceipt for ScrollReceipt { + fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result<ReceiptWithBloom<Self>> { + let header_buf = &mut &**buf; + let header = Header::decode(header_buf)?; + + // Legacy receipt, reuse initial buffer without advancing + if header.list { + return Self::rlp_decode_inner(buf, ScrollTxType::Legacy) + } + + // Otherwise, advance the buffer and try decoding type flag followed by receipt + *buf = *header_buf; + + let remaining = buf.len(); + let tx_type = ScrollTxType::decode(buf)?; + let this = Self::rlp_decode_inner(buf, tx_type)?; + + if buf.len() + header.payload_length != remaining { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(this) + } +} + +impl Encodable2718 for ScrollReceipt { + fn encode_2718_len(&self) -> usize { + !self.tx_type().is_legacy() as usize + + self.rlp_header_inner_without_bloom().length_with_payload() + } + + fn encode_2718(&self, out: &mut dyn BufMut) { + if !self.tx_type().is_legacy() { + out.put_u8(self.tx_type() as u8); + } + self.rlp_header_inner_without_bloom().encode(out); + self.rlp_encode_fields_without_bloom(out); + } +} + +impl Decodable2718 for ScrollReceipt { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> { + Ok(Self::rlp_decode_inner_without_bloom(buf, ScrollTxType::try_from(ty)?)?) + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> { + Ok(Self::rlp_decode_inner_without_bloom(buf, ScrollTxType::Legacy)?) + } +} + +impl Encodable for ScrollReceipt { + fn encode(&self, out: &mut dyn BufMut) { + self.network_encode(out); + } + + fn length(&self) -> usize { + self.network_len() + } +} + +impl Decodable for ScrollReceipt { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> { + Ok(Self::network_decode(buf)?) + } +} + +impl TxReceipt for ScrollReceipt { + type Log = Log; + + fn status_or_post_state(&self) -> Eip658Value { + self.as_receipt().status_or_post_state() + } + + fn status(&self) -> bool { + self.as_receipt().status() + } + + fn bloom(&self) -> Bloom { + self.as_receipt().bloom() + } + + fn cumulative_gas_used(&self) -> u64 { + self.as_receipt().cumulative_gas_used() + } + + fn logs(&self) -> &[Log] { + self.as_receipt().logs() + } +} + +impl Typed2718 for ScrollReceipt { + fn ty(&self) -> u8 { + self.tx_type().into() + } +} + +impl InMemorySize for ScrollReceipt { + fn size(&self) -> usize { + self.as_receipt().size() + } +} + +#[cfg(feature = "serde-bincode-compat")] +impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for ScrollReceipt { + type BincodeRepr<'a> = Self; + + fn as_repr(&self) -> Self::BincodeRepr<'_> { + self.clone() + } + + fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { + repr + } +} + +#[cfg(feature = "reth-codec")] +mod compact { + use super::*; + use alloy_primitives::U256; + use reth_codecs::Compact; + use std::borrow::Cow; + + #[derive(reth_codecs::CompactZstd)] + #[reth_zstd( + compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, + decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR + )] + struct CompactScrollReceipt<'a> { + tx_type: ScrollTxType, + success: bool, + cumulative_gas_used: u64, + #[allow(clippy::owned_cow)] + logs: Cow<'a, Vec<Log>>, + l1_fee: Option<U256>, + } + + impl<'a> From<&'a ScrollReceipt> for CompactScrollReceipt<'a> { + fn from(receipt: &'a ScrollReceipt) -> Self { + Self { + tx_type: receipt.tx_type(), + success: receipt.status(), + cumulative_gas_used: receipt.cumulative_gas_used(), + logs: Cow::Borrowed(&receipt.as_receipt().logs), + l1_fee: (receipt.l1_fee() != U256::ZERO).then_some(receipt.l1_fee()), + } + } + } + + impl From<CompactScrollReceipt<'_>> for ScrollReceipt { + fn from(receipt: CompactScrollReceipt<'_>) -> Self { + let CompactScrollReceipt { tx_type, success, cumulative_gas_used, logs, l1_fee } = + receipt; + + let inner = + Receipt { status: success.into(), cumulative_gas_used, logs: logs.into_owned() }; + + match tx_type { + ScrollTxType::Legacy => { + Self::Legacy(ScrollTransactionReceipt::new(inner, l1_fee.unwrap_or_default())) + } + ScrollTxType::Eip2930 => { + Self::Eip2930(ScrollTransactionReceipt::new(inner, l1_fee.unwrap_or_default())) + } + ScrollTxType::Eip1559 => { + Self::Eip1559(ScrollTransactionReceipt::new(inner, l1_fee.unwrap_or_default())) + } + ScrollTxType::Eip7702 => { + Self::Eip7702(ScrollTransactionReceipt::new(inner, l1_fee.unwrap_or_default())) + } + ScrollTxType::L1Message => Self::L1Message(inner), + } + } + } + + impl Compact for ScrollReceipt { + fn to_compact<B>(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + CompactScrollReceipt::from(self).to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (receipt, buf) = CompactScrollReceipt::from_compact(buf, len); + (receipt.into(), buf) + } + } +}
diff --git reth/crates/scroll/primitives/src/transaction/mod.rs scroll-reth/crates/scroll/primitives/src/transaction/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..87471e507e9013fa44fb153d98f5553d1bc74e19 --- /dev/null +++ scroll-reth/crates/scroll/primitives/src/transaction/mod.rs @@ -0,0 +1,6 @@ +//! Scroll primitives transaction types. + +pub mod tx_type; + +/// Signed transaction. +pub type ScrollTransactionSigned = scroll_alloy_consensus::ScrollTxEnvelope;
diff --git reth/crates/scroll/primitives/src/transaction/tx_type.rs scroll-reth/crates/scroll/primitives/src/transaction/tx_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..e549b81bd97a09f15f1b00dc99f1f2a34cb292b0 --- /dev/null +++ scroll-reth/crates/scroll/primitives/src/transaction/tx_type.rs @@ -0,0 +1,47 @@ +//! Scroll transaction type. + +pub use scroll_alloy_consensus::ScrollTxType; + +#[cfg(test)] +mod tests { + use super::*; + use reth_codecs::{txtype::*, Compact}; + use rstest::rstest; + use scroll_alloy_consensus::L1_MESSAGE_TX_TYPE_ID; + + #[rstest] + #[case(ScrollTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(ScrollTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(ScrollTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(ScrollTxType::L1Message, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![L1_MESSAGE_TX_TYPE_ID])] + fn test_txtype_to_compact( + #[case] tx_type: ScrollTxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec<u8>, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!( + identifier, expected_identifier, + "Unexpected identifier for ScrollTxType {tx_type:?}", + ); + assert_eq!(buf, expected_buf, "Unexpected buffer for ScrollTxType {tx_type:?}",); + } + + #[rstest] + #[case(ScrollTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(ScrollTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(ScrollTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(ScrollTxType::L1Message, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![L1_MESSAGE_TX_TYPE_ID])] + fn test_txtype_from_compact( + #[case] expected_type: ScrollTxType, + #[case] identifier: usize, + #[case] buf: Vec<u8>, + ) { + let (actual_type, remaining_buf) = ScrollTxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } +}
diff --git reth/crates/scroll/rpc/Cargo.toml scroll-reth/crates/scroll/rpc/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ea97f595b00a283f71670d9027a29e30026c4aa8 --- /dev/null +++ scroll-reth/crates/scroll/rpc/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "reth-scroll-rpc" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Ethereum RPC implementation for scroll." + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-evm.workspace = true +reth-primitives-traits.workspace = true +reth-provider.workspace = true +reth-rpc-eth-api = { workspace = true, features = ["scroll"] } +reth-rpc-eth-types.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-transaction-pool.workspace = true +reth-rpc = { workspace = true, features = ["scroll"] } +reth-rpc-convert = { workspace = true, features = ["scroll"] } +reth-node-api.workspace = true +reth-node-builder.workspace = true + +# scroll +reth-scroll-chainspec.workspace = true +reth-scroll-evm.workspace = true +reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } +scroll-alloy-consensus.workspace = true +scroll-alloy-hardforks.workspace = true +scroll-alloy-network.workspace = true +scroll-alloy-rpc-types.workspace = true + +# ethereum +alloy-primitives.workspace = true +alloy-rpc-types-eth.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true +revm.workspace = true +alloy-transport.workspace = true +alloy-json-rpc.workspace = true +alloy-rpc-client.workspace = true +alloy-transport-http.workspace = true + +# reqwest +reqwest = { workspace = true, default-features = false, features = ["rustls-tls-native-roots"] } + +# tracing +tracing.workspace = true + +# async +tokio.workspace = true + +# rpc +jsonrpsee-types.workspace = true + +# misc +eyre.workspace = true +thiserror.workspace = true + +[features] +js-tracer = ["reth-rpc/js-tracer"]
diff --git reth/crates/scroll/rpc/src/error.rs scroll-reth/crates/scroll/rpc/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..848f08a719e7c55442db08635533beb5231c9690 --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/error.rs @@ -0,0 +1,114 @@ +//! RPC errors specific to Scroll. + +use alloy_json_rpc::ErrorPayload; +use alloy_rpc_types_eth::BlockError; +use alloy_transport::{RpcError, TransportErrorKind}; +use jsonrpsee_types::error::INTERNAL_ERROR_CODE; +use reth_evm::execute::ProviderError; +use reth_rpc_convert::transaction::EthTxEnvError; +use reth_rpc_eth_api::{AsEthApiError, TransactionConversionError}; +use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError}; +use revm::context::result::{EVMError, HaltReason}; +use std::convert::Infallible; + +/// Scroll specific errors, that extend [`EthApiError`]. +#[derive(Debug, thiserror::Error)] +pub enum ScrollEthApiError { + /// L1 ethereum error. + #[error(transparent)] + Eth(#[from] EthApiError), + /// Sequencer client error. + #[error(transparent)] + Sequencer(#[from] SequencerClientError), +} + +impl AsEthApiError for ScrollEthApiError { + fn as_err(&self) -> Option<&EthApiError> { + match self { + Self::Eth(err) => Some(err), + _ => None, + } + } +} + +impl From<ScrollEthApiError> for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: ScrollEthApiError) -> Self { + match err { + ScrollEthApiError::Eth(err) => err.into(), + ScrollEthApiError::Sequencer(err) => err.into(), + } + } +} + +impl From<EthTxEnvError> for ScrollEthApiError { + fn from(value: EthTxEnvError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From<BlockError> for ScrollEthApiError { + fn from(error: BlockError) -> Self { + Self::Eth(error.into()) + } +} + +impl<T> From<EVMError<T>> for ScrollEthApiError +where + T: Into<EthApiError>, +{ + fn from(error: EVMError<T>) -> Self { + Self::Eth(error.into()) + } +} + +impl FromEvmHalt<HaltReason> for ScrollEthApiError { + fn from_evm_halt(halt: HaltReason, gas_limit: u64) -> Self { + EthApiError::from_evm_halt(halt, gas_limit).into() + } +} + +impl From<TransactionConversionError> for ScrollEthApiError { + fn from(value: TransactionConversionError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From<ProviderError> for ScrollEthApiError { + fn from(value: ProviderError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From<Infallible> for ScrollEthApiError { + fn from(value: Infallible) -> Self { + match value {} + } +} + +/// Error type when interacting with the Sequencer +#[derive(Debug, thiserror::Error)] +pub enum SequencerClientError { + /// Wrapper around an [`RpcError<TransportErrorKind>`]. + #[error(transparent)] + HttpError(#[from] RpcError<TransportErrorKind>), + /// Thrown when serializing transaction to forward to sequencer + #[error("invalid sequencer transaction")] + InvalidSequencerTransaction, +} + +impl From<SequencerClientError> for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: SequencerClientError) -> Self { + match err { + SequencerClientError::HttpError(RpcError::ErrorResp(ErrorPayload { + code, + message, + data, + })) => jsonrpsee_types::error::ErrorObject::owned(code as i32, message, data), + err => jsonrpsee_types::error::ErrorObject::owned( + INTERNAL_ERROR_CODE, + err.to_string(), + None::<String>, + ), + } + } +}
diff --git reth/crates/scroll/rpc/src/eth/block.rs scroll-reth/crates/scroll/rpc/src/eth/block.rs new file mode 100644 index 0000000000000000000000000000000000000000..048944d57d4fd03ad4cd65118d84c1476d072c36 --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/block.rs @@ -0,0 +1,55 @@ +//! Loads and formats Scroll block RPC response. + +use crate::{RpcBlockHeaderMut, ScrollEthApi, ScrollEthApiError}; + +use alloy_consensus::BlockHeader; +use alloy_eips::BlockId; +use reth_provider::HeaderProvider; +use reth_rpc_convert::{RpcConvert, RpcTypes}; +use reth_rpc_eth_api::{ + helpers::{EthBlocks, LoadBlock}, + EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, +}; +use reth_rpc_eth_types::error::FromEvmError; + +impl<N, Rpc> EthBlocks for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, + <<Self as EthApiTypes>::NetworkTypes as RpcTypes>::Header: RpcBlockHeaderMut, +{ + async fn rpc_block( + &self, + block_id: BlockId, + full: bool, + ) -> Result<Option<RpcBlock<Self::NetworkTypes>>, Self::Error> + where + Self: FullEthApiTypes, + { + let Some(block) = self.recovered_block(block_id).await? else { return Ok(None) }; + + let td = self + .provider() + .header_td_by_number(block.number()) + .map_err(Self::Error::from_eth_err)?; + + let mut block = block.clone_into_rpc_block( + full.into(), + |tx, tx_info| self.tx_resp_builder().fill(tx, tx_info), + |header, size| self.tx_resp_builder().convert_header(header, size), + )?; + + *block.header.total_difficulty_mut() = td; + + Ok(Some(block)) + } +} + +impl<N, Rpc> LoadBlock for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ +}
diff --git reth/crates/scroll/rpc/src/eth/call.rs scroll-reth/crates/scroll/rpc/src/eth/call.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ea1fa40f022e8ebfb4c2e3cbb8b6aa9111853ef --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/call.rs @@ -0,0 +1,40 @@ +use crate::{ScrollEthApi, ScrollEthApiError}; + +use reth_rpc_eth_api::{ + helpers::{estimate::EstimateCall, Call, EthCall}, + RpcConvert, RpcNodeCore, +}; +use reth_rpc_eth_types::error::FromEvmError; + +impl<N, Rpc> EthCall for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError, Evm = N::Evm>, +{ +} + +impl<N, Rpc> EstimateCall for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError, Evm = N::Evm>, +{ +} + +impl<N, Rpc> Call for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError, Evm = N::Evm>, +{ + #[inline] + fn call_gas_limit(&self) -> u64 { + self.inner.eth_api.gas_cap() + } + + #[inline] + fn max_simulate_blocks(&self) -> u64 { + self.inner.eth_api.max_simulate_blocks() + } +}
diff --git reth/crates/scroll/rpc/src/eth/fee.rs scroll-reth/crates/scroll/rpc/src/eth/fee.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ae87b885c1433a3b133ba70df8471d45c46384a --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/fee.rs @@ -0,0 +1,268 @@ +use crate::{ScrollEthApi, ScrollEthApiError}; + +use alloy_consensus::BlockHeader; +use alloy_eips::eip7840::BlobParams; +use alloy_primitives::{Sealable, U256}; +use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; +use reth_chainspec::EthChainSpec; +use reth_primitives_traits::BlockBody; +use reth_provider::{ + BaseFeeProvider, BlockIdReader, ChainSpecProvider, HeaderProvider, ProviderHeader, + StateProviderFactory, +}; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{ + helpers::{EthFees, LoadFee}, + FromEthApiError, RpcNodeCore, RpcNodeCoreExt, +}; +use reth_rpc_eth_types::{ + error::FromEvmError, fee_history::calculate_reward_percentiles_for_block, EthApiError, +}; +use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; +use reth_scroll_evm::ScrollBaseFeeProvider; +use scroll_alloy_hardforks::ScrollHardforks; +use std::future::Future; +use tracing::debug; + +impl<N, Rpc> EthFees for ScrollEthApi<N, Rpc> +where + Self: LoadFee< + Provider: StateProviderFactory + + ChainSpecProvider< + ChainSpec: EthChainSpec<Header = ProviderHeader<Self::Provider>> + + ScrollHardforks + + ChainConfig<Config = ScrollChainConfig>, + >, + >, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ + #[allow(clippy::manual_async_fn)] + fn fee_history( + &self, + mut block_count: u64, + mut newest_block: BlockNumberOrTag, + reward_percentiles: Option<Vec<f64>>, + ) -> impl Future<Output = Result<FeeHistory, Self::Error>> + Send { + async move { + if block_count == 0 { + return Ok(FeeHistory::default()) + } + + // ensure the given reward percentiles aren't excessive + if reward_percentiles.as_ref().map(|perc| perc.len() as u64) > + Some(self.gas_oracle().config().max_reward_percentile_count) + { + return Err(EthApiError::InvalidRewardPercentiles.into()) + } + + // See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225 + let max_fee_history = if reward_percentiles.is_none() { + self.gas_oracle().config().max_header_history + } else { + self.gas_oracle().config().max_block_history + }; + + if block_count > max_fee_history { + debug!( + requested = block_count, + truncated = max_fee_history, + "Sanitizing fee history block count" + ); + block_count = max_fee_history + } + + if newest_block.is_pending() { + // cap the target block since we don't have fee history for the pending block + newest_block = BlockNumberOrTag::Latest; + } + + let end_block = self + .provider() + .block_number_for_id(newest_block.into()) + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(newest_block.into()))?; + + // need to add 1 to the end block to get the correct (inclusive) range + let end_block_plus = end_block + 1; + // Ensure that we would not be querying outside of genesis + if end_block_plus < block_count { + block_count = end_block_plus; + } + + // If reward percentiles were specified, we + // need to validate that they are monotonically + // increasing and 0 <= p <= 100 + // Note: The types used ensure that the percentiles are never < 0 + if let Some(percentiles) = &reward_percentiles && + percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) + { + return Err(EthApiError::InvalidRewardPercentiles.into()) + } + + // Fetch the headers and ensure we got all of them + // + // Treat a request for 1 block as a request for `newest_block..=newest_block`, + // otherwise `newest_block - 2` + // NOTE: We ensured that block count is capped + let start_block = end_block_plus - block_count; + + // Collect base fees, gas usage ratios and (optionally) reward percentile data + let mut base_fee_per_gas: Vec<u128> = Vec::new(); + let mut gas_used_ratio: Vec<f64> = Vec::new(); + + let mut base_fee_per_blob_gas: Vec<u128> = Vec::new(); + let mut blob_gas_used_ratio: Vec<f64> = Vec::new(); + + let mut rewards: Vec<Vec<u128>> = Vec::new(); + + let chain_spec = self.provider().chain_spec(); + let base_fee_provider = ScrollBaseFeeProvider::new(chain_spec.clone()); + + // Check if the requested range is within the cache bounds + let fee_entries = self.fee_history_cache().get_history(start_block, end_block).await; + + if let Some(fee_entries) = fee_entries { + if fee_entries.len() != block_count as usize { + return Err(EthApiError::InvalidBlockRange.into()) + } + + for entry in &fee_entries { + base_fee_per_gas + .push(entry.header.base_fee_per_gas().unwrap_or_default() as u128); + gas_used_ratio.push(entry.gas_used_ratio); + base_fee_per_blob_gas.push(entry.base_fee_per_blob_gas.unwrap_or_default()); + blob_gas_used_ratio.push(entry.blob_gas_used_ratio); + + if let Some(percentiles) = &reward_percentiles { + let mut block_rewards = Vec::with_capacity(percentiles.len()); + for &percentile in percentiles { + block_rewards.push(self.approximate_percentile(entry, percentile)); + } + rewards.push(block_rewards); + } + } + let last_entry = fee_entries.last().expect("is not empty"); + + // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the + // next block + let mut provider = self + .provider() + .state_by_block_id(last_entry.header.hash_slow().into()) + .map_err(Into::<EthApiError>::into)?; + base_fee_per_gas.push( + base_fee_provider + .next_block_base_fee( + &mut provider, + &last_entry.header, + last_entry.header.timestamp(), + ) + .map_err(Into::<EthApiError>::into)? as u128, + ); + + base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); + } else { + // read the requested header range + let headers = self.provider() + .sealed_headers_range(start_block..=end_block) + .map_err(Self::Error::from_eth_err)?; + if headers.len() != block_count as usize { + return Err(EthApiError::InvalidBlockRange.into()) + } + + for header in &headers { + base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); + gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); + + let blob_params = chain_spec + .blob_params_at_timestamp(header.timestamp()) + .unwrap_or_else(BlobParams::cancun); + + base_fee_per_blob_gas.push(header.blob_fee(blob_params).unwrap_or_default()); + blob_gas_used_ratio.push( + header.blob_gas_used().unwrap_or_default() as f64 + / blob_params.max_blob_gas_per_block() as f64, + ); + + // Percentiles were specified, so we need to collect reward percentile info + if let Some(percentiles) = &reward_percentiles { + let (block, receipts) = self.cache() + .get_block_and_receipts(header.hash()) + .await + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::InvalidBlockRange)?; + rewards.push( + calculate_reward_percentiles_for_block( + percentiles, + header.gas_used(), + header.base_fee_per_gas().unwrap_or_default(), + block.body().transactions(), + &receipts, + ) + .unwrap_or_default(), + ); + } + } + + // The spec states that `base_fee_per_gas` "[..] includes the next block after the + // newest of the returned range, because this value can be derived from the + // newest block" + // + // The unwrap is safe since we checked earlier that we got at least 1 header. + let last_header = headers.last().expect("is present"); + let mut provider = self + .provider() + .state_by_block_id(last_header.hash().into()) + .map_err(Into::<EthApiError>::into)?; + base_fee_per_gas.push( + base_fee_provider + .next_block_base_fee( + &mut provider, + &last_header.header(), + last_header.timestamp(), + ) + .map_err(Into::<EthApiError>::into)? as u128, + ); + // Same goes for the `base_fee_per_blob_gas`: + // > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block. + base_fee_per_blob_gas.push( + last_header + .maybe_next_block_blob_fee( + chain_spec.blob_params_at_timestamp(last_header.timestamp()) + ).unwrap_or_default() + ); + }; + + // Scroll-specific logic: update rewards if the newest_block is not at capacity and tip + // calculation succeeds + let (suggest_tip_cap_result, is_at_capacity) = self + .gas_oracle() + .calculate_suggest_tip_cap( + newest_block, + U256::from(self.inner.min_suggested_priority_fee), + self.inner.payload_size_limit, + ) + .await; + + let reward = match (is_at_capacity, suggest_tip_cap_result) { + (false, Ok(suggest_tip_cap_value)) => { + let suggest_tip_cap = suggest_tip_cap_value.saturating_to::<u128>(); + reward_percentiles.map(|percentiles| { + vec![vec![suggest_tip_cap; percentiles.len()]; block_count as usize] + }) + } + _ => reward_percentiles.map(|_| rewards), + }; + + Ok(FeeHistory { + base_fee_per_gas, + gas_used_ratio, + base_fee_per_blob_gas, + blob_gas_used_ratio, + oldest_block: start_block, + reward, + }) + } + } +}
diff --git reth/crates/scroll/rpc/src/eth/mod.rs scroll-reth/crates/scroll/rpc/src/eth/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ae75327a23bd05c11d3874535c576e0e0881cf6f --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/mod.rs @@ -0,0 +1,419 @@ +//! Scroll-Reth `eth_` endpoint implementation. + +use crate::{ + eth::{receipt::ScrollReceiptConverter, transaction::ScrollTxInfoMapper}, + ScrollEthApiError, SequencerClient, +}; +use alloy_primitives::U256; +use eyre::WrapErr; +pub use receipt::ScrollReceiptBuilder; +use reth_chainspec::{EthereumHardforks, Hardforks}; +use reth_evm::ConfigureEvm; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; +use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; +use reth_provider::{BlockReader, ProviderHeader, ProviderTx}; +use reth_rpc::eth::{core::EthApiInner, DevSigner}; +use reth_rpc_convert::{RpcConvert, RpcConverter, RpcTypes, SignableTxRequest}; +use reth_rpc_eth_api::{ + helpers::{ + pending_block::BuildPendingEnv, AddDevSigners, EthApiSpec, EthState, LoadFee, + LoadPendingBlock, LoadState, SpawnBlocking, Trace, + }, + EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, +}; +use reth_rpc_eth_types::{error::FromEvmError, EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_tasks::{ + pool::{BlockingTaskGuard, BlockingTaskPool}, + TaskSpawner, +}; +use scroll_alloy_network::Scroll; +use std::{fmt, marker::PhantomData, sync::Arc}; + +mod block; +mod call; +mod fee; +mod pending_block; +pub mod receipt; +pub mod transaction; + +/// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. +pub type EthApiNodeBackend<N, Rpc> = EthApiInner<N, Rpc>; + +/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`ScrollEthApi`]. +pub trait ScrollNodeCore: RpcNodeCore<Provider: BlockReader> {} +impl<T> ScrollNodeCore for T where T: RpcNodeCore<Provider: BlockReader> {} + +/// Scroll-Reth `Eth` API implementation. +/// +/// This type provides the functionality for handling `eth_` related requests. +/// +/// This wraps a default `Eth` implementation, and provides additional functionality where the +/// scroll spec deviates from the default (ethereum) spec, e.g. transaction forwarding to the +/// receipts, additional RPC fields for transaction receipts. +/// +/// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented +/// all the `Eth` helper traits and prerequisite traits. +pub struct ScrollEthApi<N: RpcNodeCore, Rpc: RpcConvert> { + /// Gateway to node's core components. + inner: Arc<ScrollEthApiInner<N, Rpc>>, +} + +impl<N: RpcNodeCore, Rpc: RpcConvert> Clone for ScrollEthApi<N, Rpc> { + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + +impl<N: RpcNodeCore, Rpc: RpcConvert> ScrollEthApi<N, Rpc> { + /// Creates a new [`ScrollEthApi`]. + pub fn new( + eth_api: EthApiNodeBackend<N, Rpc>, + sequencer_client: Option<SequencerClient>, + min_suggested_priority_fee: U256, + payload_size_limit: u64, + propagate_local_transactions: bool, + ) -> Self { + let inner = Arc::new(ScrollEthApiInner { + eth_api, + min_suggested_priority_fee, + payload_size_limit, + sequencer_client, + propagate_local_transactions, + }); + Self { inner } + } +} + +impl<N, Rpc> ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + /// Returns a reference to the [`EthApiNodeBackend`]. + pub fn eth_api(&self) -> &EthApiNodeBackend<N, Rpc> { + self.inner.eth_api() + } + + /// Returns the configured sequencer client, if any. + pub fn sequencer_client(&self) -> Option<&SequencerClient> { + self.inner.sequencer_client() + } + + /// Return a builder for the [`ScrollEthApi`]. + pub fn builder() -> ScrollEthApiBuilder { + ScrollEthApiBuilder::new() + } +} + +impl<N, Rpc> EthApiTypes for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + type Error = ScrollEthApiError; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; + + fn tx_resp_builder(&self) -> &Self::RpcConvert { + self.inner.eth_api.tx_resp_builder() + } +} + +impl<N, Rpc> RpcNodeCore for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + type Primitives = N::Primitives; + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = <N as RpcNodeCore>::Evm; + type Network = <N as RpcNodeCore>::Network; + + #[inline] + fn pool(&self) -> &Self::Pool { + self.inner.eth_api.pool() + } + + #[inline] + fn evm_config(&self) -> &Self::Evm { + self.inner.eth_api.evm_config() + } + + #[inline] + fn network(&self) -> &Self::Network { + self.inner.eth_api.network() + } + + #[inline] + fn provider(&self) -> &Self::Provider { + self.inner.eth_api.provider() + } +} + +impl<N, Rpc> RpcNodeCoreExt for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + #[inline] + fn cache(&self) -> &EthStateCache<N::Primitives> { + self.inner.eth_api.cache() + } +} + +impl<N, Rpc> EthApiSpec for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + #[inline] + fn starting_block(&self) -> U256 { + self.inner.eth_api.starting_block() + } +} + +impl<N, Rpc> SpawnBlocking for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + #[inline] + fn io_task_spawner(&self) -> impl TaskSpawner { + self.inner.eth_api.task_spawner() + } + + #[inline] + fn tracing_task_pool(&self) -> &BlockingTaskPool { + self.inner.eth_api.blocking_task_pool() + } + + #[inline] + fn tracing_task_guard(&self) -> &BlockingTaskGuard { + self.inner.eth_api.blocking_task_guard() + } +} + +impl<N, Rpc> LoadFee for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ + #[inline] + fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider> { + self.inner.eth_api.gas_oracle() + } + + #[inline] + fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<N::Provider>> { + self.inner.eth_api.fee_history_cache() + } + + async fn suggested_priority_fee(&self) -> Result<U256, Self::Error> { + let min_tip = U256::from(self.inner.min_suggested_priority_fee); + self.inner + .eth_api + .gas_oracle() + .scroll_suggest_tip_cap(min_tip, self.inner.payload_size_limit) + .await + .map_err(Into::into) + } +} + +impl<N, Rpc> LoadState for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, + Self: LoadPendingBlock, +{ +} + +impl<N, Rpc> EthState for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, + Self: LoadPendingBlock, +{ + #[inline] + fn max_proof_window(&self) -> u64 { + self.inner.eth_api.eth_proof_window() + } +} + +impl<N, Rpc> Trace for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ +} + +impl<N, Rpc> AddDevSigners for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert< + Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>, + >, +{ + fn with_dev_accounts(&self) { + *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) + } +} + +impl<N: ScrollNodeCore, Rpc: RpcConvert> fmt::Debug for ScrollEthApi<N, Rpc> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ScrollEthApi").finish_non_exhaustive() + } +} + +/// Container type `ScrollEthApi` +#[allow(missing_debug_implementations)] +pub struct ScrollEthApiInner<N: ScrollNodeCore, Rpc: RpcConvert> { + /// Gateway to node's core components. + pub eth_api: EthApiNodeBackend<N, Rpc>, + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_client: Option<SequencerClient>, + /// Minimum priority fee + min_suggested_priority_fee: U256, + /// Maximum payload size + payload_size_limit: u64, + /// whether local transactions should be propagated. + propagate_local_transactions: bool, +} + +impl<N: RpcNodeCore, Rpc: RpcConvert> ScrollEthApiInner<N, Rpc> { + /// Returns a reference to the [`EthApiNodeBackend`]. + const fn eth_api(&self) -> &EthApiNodeBackend<N, Rpc> { + &self.eth_api + } + + /// Returns the configured sequencer client, if any. + const fn sequencer_client(&self) -> Option<&SequencerClient> { + self.sequencer_client.as_ref() + } +} + +/// Converter for Scroll RPC types. +pub type ScrollRpcConvert<N, NetworkT> = RpcConverter< + NetworkT, + <N as FullNodeComponents>::Evm, + ScrollReceiptConverter, + (), + ScrollTxInfoMapper<<N as FullNodeTypes>::Provider>, +>; + +/// The default suggested priority fee for the gas price oracle. +pub const DEFAULT_MIN_SUGGESTED_PRIORITY_FEE: u64 = 100; + +/// The default payload size limit in bytes for the sequencer. +pub const DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; + +/// A type that knows how to build a [`ScrollEthApi`]. +#[derive(Debug)] +pub struct ScrollEthApiBuilder<NetworkT = Scroll> { + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_url: Option<String>, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, + /// Maximum payload size + payload_size_limit: u64, + /// whether local transactions should be propagated. + propagate_local_transactions: bool, + /// Marker for network types. + _nt: PhantomData<NetworkT>, +} + +impl<NetworkT> Default for ScrollEthApiBuilder<NetworkT> { + fn default() -> Self { + Self { + sequencer_url: None, + min_suggested_priority_fee: DEFAULT_MIN_SUGGESTED_PRIORITY_FEE, + payload_size_limit: DEFAULT_PAYLOAD_SIZE_LIMIT, + propagate_local_transactions: true, + _nt: PhantomData, + } + } +} + +impl<NetworkT> ScrollEthApiBuilder<NetworkT> { + /// Creates a [`ScrollEthApiBuilder`] instance. + pub fn new() -> Self { + Self::default() + } + + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_url: Option<String>) -> Self { + self.sequencer_url = sequencer_url; + self + } + + /// With minimum suggested priority fee (tip) + pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self { + self.min_suggested_priority_fee = min; + self + } + + /// With payload size limit + pub const fn with_payload_size_limit(mut self, limit: u64) -> Self { + self.payload_size_limit = limit; + self + } + + /// With whether local transactions should be propagated. + pub const fn with_propagate_local_transactions( + &mut self, + propagate_local_transactions: bool, + ) -> &mut Self { + self.propagate_local_transactions = propagate_local_transactions; + self + } +} + +impl<N, NetworkT> EthApiBuilder<N> for ScrollEthApiBuilder<NetworkT> +where + N: FullNodeComponents< + Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>, + Types: NodeTypes<ChainSpec: Hardforks + EthereumHardforks>, + >, + NetworkT: RpcTypes, + ScrollRpcConvert<N, NetworkT>: RpcConvert<Network = NetworkT>, + ScrollEthApi<N, ScrollRpcConvert<N, NetworkT>>: + FullEthApiServer<Provider = N::Provider, Pool = N::Pool> + AddDevSigners, +{ + type EthApi = ScrollEthApi<N, ScrollRpcConvert<N, NetworkT>>; + + async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result<Self::EthApi> { + let Self { + min_suggested_priority_fee, + payload_size_limit, + sequencer_url, + propagate_local_transactions, + .. + } = self; + let rpc_converter = RpcConverter::new(ScrollReceiptConverter::default()) + .with_mapper(ScrollTxInfoMapper::new(ctx.components.provider().clone())); + + let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); + + let sequencer_client = if let Some(url) = sequencer_url { + Some( + SequencerClient::new(&url) + .await + .wrap_err_with(|| "Failed to init sequencer client with: {url}")?, + ) + } else { + None + }; + + Ok(ScrollEthApi::new( + eth_api, + sequencer_client, + U256::from(min_suggested_priority_fee), + payload_size_limit, + propagate_local_transactions, + )) + } +}
diff --git reth/crates/scroll/rpc/src/eth/pending_block.rs scroll-reth/crates/scroll/rpc/src/eth/pending_block.rs new file mode 100644 index 0000000000000000000000000000000000000000..36e21f48446b513eabe109c2924de739fffe859e --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/pending_block.rs @@ -0,0 +1,29 @@ +//! Loads Scroll pending block for an RPC response. + +use crate::{ScrollEthApi, ScrollEthApiError}; +use reth_rpc_eth_api::{ + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, + RpcConvert, RpcNodeCore, +}; +use reth_rpc_eth_types::{builder::config::PendingBlockKind, error::FromEvmError, PendingBlock}; + +impl<N, Rpc> LoadPendingBlock for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + ScrollEthApiError: FromEvmError<N::Evm>, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + #[inline] + fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> { + self.inner.eth_api.pending_block() + } + + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<Self::Evm> { + self.inner.eth_api.pending_env_builder() + } + + fn pending_block_kind(&self) -> PendingBlockKind { + self.inner.eth_api.pending_block_kind() + } +}
diff --git reth/crates/scroll/rpc/src/eth/receipt.rs scroll-reth/crates/scroll/rpc/src/eth/receipt.rs new file mode 100644 index 0000000000000000000000000000000000000000..0781fcc280071cc2de4794ac606efdbbeb0efdaa --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/receipt.rs @@ -0,0 +1,105 @@ +//! Loads and formats Scroll receipt RPC response. + +use crate::{ScrollEthApi, ScrollEthApiError}; +use alloy_consensus::{Receipt, TxReceipt}; +use alloy_rpc_types_eth::{Log, TransactionReceipt}; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_convert::{ + transaction::{ConvertReceiptInput, ReceiptConverter}, + RpcConvert, +}; +use reth_rpc_eth_api::{helpers::LoadReceipt, RpcNodeCore}; +use reth_rpc_eth_types::receipt::build_receipt; +use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; +use scroll_alloy_consensus::ScrollReceiptEnvelope; +use scroll_alloy_rpc_types::{ScrollTransactionReceipt, ScrollTransactionReceiptFields}; +use std::fmt::Debug; + +impl<N, Rpc> LoadReceipt for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ +} + +/// Converter for Scroll receipts. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct ScrollReceiptConverter; + +impl<N> ReceiptConverter<N> for ScrollReceiptConverter +where + N: NodePrimitives<SignedTx = ScrollTransactionSigned, Receipt = ScrollReceipt>, +{ + type RpcReceipt = ScrollTransactionReceipt; + type Error = ScrollEthApiError; + + fn convert_receipts( + &self, + inputs: Vec<ConvertReceiptInput<'_, N>>, + ) -> Result<Vec<Self::RpcReceipt>, Self::Error> { + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + receipts.push(ScrollReceiptBuilder::new(input)?.build()); + } + + Ok(receipts) + } +} + +/// Builds an [`ScrollTransactionReceipt`]. +#[derive(Debug)] +pub struct ScrollReceiptBuilder { + /// Core receipt, has all the fields of an L1 receipt and is the basis for the Scroll receipt. + pub core_receipt: TransactionReceipt<ScrollReceiptEnvelope<Log>>, + /// Additional Scroll receipt fields. + pub scroll_receipt_fields: ScrollTransactionReceiptFields, +} + +impl ScrollReceiptBuilder { + /// Returns a new builder. + pub fn new<N>(input: ConvertReceiptInput<'_, N>) -> Result<Self, ScrollEthApiError> + where + N: NodePrimitives<SignedTx = ScrollTransactionSigned, Receipt = ScrollReceipt>, + { + let scroll_receipt_fields = + ScrollTransactionReceiptFields { l1_fee: Some(input.receipt.l1_fee().saturating_to()) }; + let core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { + let map_logs = move |receipt: alloy_consensus::Receipt| { + let Receipt { status, cumulative_gas_used, logs } = receipt; + let logs = Log::collect_for_receipt(next_log_index, meta, logs); + Receipt { status, cumulative_gas_used, logs } + }; + match receipt { + ScrollReceipt::Legacy(receipt) => { + ScrollReceiptEnvelope::<Log>::Legacy(map_logs(receipt.inner).into_with_bloom()) + } + ScrollReceipt::Eip2930(receipt) => { + ScrollReceiptEnvelope::<Log>::Eip2930(map_logs(receipt.inner).into_with_bloom()) + } + ScrollReceipt::Eip1559(receipt) => { + ScrollReceiptEnvelope::<Log>::Eip1559(map_logs(receipt.inner).into_with_bloom()) + } + ScrollReceipt::Eip7702(receipt) => { + ScrollReceiptEnvelope::<Log>::Eip7702(map_logs(receipt.inner).into_with_bloom()) + } + ScrollReceipt::L1Message(receipt) => { + ScrollReceiptEnvelope::<Log>::L1Message(map_logs(receipt).into_with_bloom()) + } + } + }); + + Ok(Self { core_receipt, scroll_receipt_fields }) + } + + /// Builds [`ScrollTransactionReceipt`] by combing core (l1) receipt fields and additional + /// Scroll receipt fields. + pub fn build(self) -> ScrollTransactionReceipt { + let Self { core_receipt: inner, scroll_receipt_fields } = self; + + let ScrollTransactionReceiptFields { l1_fee, .. } = scroll_receipt_fields; + + ScrollTransactionReceipt { inner, l1_fee } + } +}
diff --git reth/crates/scroll/rpc/src/eth/transaction.rs scroll-reth/crates/scroll/rpc/src/eth/transaction.rs new file mode 100644 index 0000000000000000000000000000000000000000..945391a97d55478272015035dcbcf953ecf8b06f --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/eth/transaction.rs @@ -0,0 +1,143 @@ +//! Loads and formats Scroll transaction RPC response. + +use crate::{ScrollEthApi, ScrollEthApiError, SequencerClient}; +use alloy_consensus::transaction::TransactionInfo; +use alloy_primitives::{Bytes, B256}; +use reth_evm::execute::ProviderError; +use reth_provider::ReceiptProvider; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{ + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + try_into_scroll_tx_info, FromEthApiError, RpcNodeCore, TxInfoMapper, +}; +use reth_rpc_eth_types::utils::recover_raw_transaction; +use reth_scroll_primitives::ScrollReceipt; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; +use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; +use std::{ + fmt::{Debug, Formatter}, + time::Duration, +}; + +impl<N, Rpc> EthTransactions for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ + fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> { + self.inner.eth_api.signers() + } + + fn send_raw_transaction_sync_timeout(&self) -> Duration { + self.inner.eth_api.send_raw_transaction_sync_timeout() + } + + /// Decodes and recovers the transaction and submits it to the pool. + /// + /// Returns the hash of the transaction. + async fn send_raw_transaction(&self, tx: Bytes) -> Result<B256, Self::Error> { + let recovered = recover_raw_transaction(&tx)?; + let pool_transaction = <Self::Pool as TransactionPool>::Transaction::from_pooled(recovered); + + // submit the transaction to the pool with a `Local` origin + let AddedTransactionOutcome { hash, .. } = self + .pool() + .add_transaction(TransactionOrigin::Local, pool_transaction.clone()) + .await + .map_err(Self::Error::from_eth_err)?; + + // On scroll, transactions are forwarded directly to the sequencer to be included in + // blocks that it builds. + if let Some(client) = self.raw_tx_forwarder() { + tracing::debug!(target: "scroll::rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer"); + + if self.inner.propagate_local_transactions { + // Forward to remote sequencer RPC asynchronously (fire and forget) + let client = client.clone(); + tokio::spawn(async move { + match client.forward_raw_transaction(&tx).await { + Ok(sequencer_hash) => { + tracing::debug!(target: "scroll::rpc::eth", local_hash=%hash, %sequencer_hash, "successfully forwarded transaction to sequencer"); + } + Err(err) => { + tracing::warn!(target: "scroll::rpc::eth", %err, local_hash=%hash, "failed to forward transaction to sequencer, but transaction is in local pool and will be propagated"); + } + } + }); + } else { + // Forward to remote sequencer RPC synchronously + match client.forward_raw_transaction(&tx).await { + Ok(sequencer_hash) => { + tracing::debug!(target: "scroll::rpc::eth", local_hash=%hash, %sequencer_hash, "successfully forwarded transaction to sequencer"); + } + Err(err) => { + tracing::warn!(target: "scroll::rpc::eth", %err, local_hash=%hash, "failed to forward transaction to sequencer"); + return Err(ScrollEthApiError::Sequencer(err)); + } + } + } + } + + Ok(hash) + } +} + +impl<N, Rpc> LoadTransaction for ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives, Error = ScrollEthApiError>, +{ +} + +impl<N, Rpc> ScrollEthApi<N, Rpc> +where + N: RpcNodeCore, + Rpc: RpcConvert<Primitives = N::Primitives>, +{ + /// Returns the [`SequencerClient`] if one is set. + pub fn raw_tx_forwarder(&self) -> Option<SequencerClient> { + self.inner.sequencer_client.clone() + } +} + +/// Scroll implementation of [`TxInfoMapper`]. +/// +/// Receipt is fetched to extract the `l1_fee` for all transactions but L1 messages. +pub struct ScrollTxInfoMapper<Provider>(Provider); + +impl<Provider: Clone> Clone for ScrollTxInfoMapper<Provider> { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl<Provider: Debug> Debug for ScrollTxInfoMapper<Provider> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ScrollTxInfoMapper").finish() + } +} + +impl<Provider> ScrollTxInfoMapper<Provider> { + /// Creates [`ScrollTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. + pub const fn new(provider: Provider) -> Self { + Self(provider) + } +} + +impl<Provider> TxInfoMapper<ScrollTxEnvelope> for ScrollTxInfoMapper<Provider> +where + Provider: ReceiptProvider<Receipt = ScrollReceipt>, +{ + type Out = ScrollTransactionInfo; + type Err = ProviderError; + + fn try_map( + &self, + tx: &ScrollTxEnvelope, + tx_info: TransactionInfo, + ) -> Result<Self::Out, ProviderError> { + try_into_scroll_tx_info(&self.0, tx, tx_info) + } +}
diff --git reth/crates/scroll/rpc/src/lib.rs scroll-reth/crates/scroll/rpc/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..fb631a2caaf153e79210ed58c7110c6d9997ff8c --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/lib.rs @@ -0,0 +1,29 @@ +//! Scroll-Reth RPC support. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +pub mod error; +pub mod eth; +pub mod sequencer; + +pub use error::{ScrollEthApiError, SequencerClientError}; +pub use eth::{ScrollEthApi, ScrollReceiptBuilder}; +pub use sequencer::SequencerClient; + +/// Gives mutable access to the fields of an RPC block header. +pub trait RpcBlockHeaderMut { + /// Mutable reference to the total difficulty. + fn total_difficulty_mut(&mut self) -> &mut Option<alloy_primitives::U256>; +} + +impl RpcBlockHeaderMut for alloy_rpc_types_eth::Header { + fn total_difficulty_mut(&mut self) -> &mut Option<alloy_primitives::U256> { + &mut self.total_difficulty + } +}
diff --git reth/crates/scroll/rpc/src/sequencer.rs scroll-reth/crates/scroll/rpc/src/sequencer.rs new file mode 100644 index 0000000000000000000000000000000000000000..637525460d4c5b2ed17a19c1ee833674db89a187 --- /dev/null +++ scroll-reth/crates/scroll/rpc/src/sequencer.rs @@ -0,0 +1,201 @@ +//! Helpers for scroll specific RPC implementations. + +use crate::SequencerClientError; +use alloy_json_rpc::{RpcRecv, RpcSend}; +use alloy_primitives::{hex, B256}; +use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client}; +use alloy_transport_http::Http; +use std::{str::FromStr, sync::Arc}; +use thiserror::Error; +use tracing::warn; + +/// Sequencer client error +#[derive(Error, Debug)] +pub enum Error { + /// Invalid scheme + #[error("Invalid scheme of sequencer url: {0}")] + InvalidScheme(String), + /// Invalid url + #[error("Invalid sequencer url: {0}")] + InvalidUrl(String), + /// Establishing a connection to the sequencer endpoint resulted in an error. + #[error("Failed to connect to sequencer: {0}")] + TransportError( + #[from] + #[source] + alloy_transport::TransportError, + ), + /// Reqwest failed to init client + #[error("Failed to init reqwest client for sequencer: {0}")] + ReqwestError( + #[from] + #[source] + reqwest::Error, + ), +} + +/// A client to interact with a Sequencer +#[derive(Debug, Clone)] +pub struct SequencerClient { + inner: Arc<SequencerClientInner>, +} + +impl SequencerClient { + /// Creates a new [`SequencerClient`] for the given URL. + /// + /// If the URL is a websocket endpoint we connect a websocket instance. + pub async fn new(sequencer_endpoint: impl Into<String>) -> Result<Self, Error> { + let sequencer_endpoint = sequencer_endpoint.into(); + let endpoint = BuiltInConnectionString::from_str(&sequencer_endpoint)?; + if let BuiltInConnectionString::Http(url) = endpoint { + let client = reqwest::Client::builder() + // we force use tls to prevent native issues + .use_rustls_tls() + .build()?; + Self::with_http_client(url, client) + } else { + let client = ClientBuilder::default().connect_with(endpoint).await?; + let inner = SequencerClientInner { sequencer_endpoint, client }; + Ok(Self { inner: Arc::new(inner) }) + } + } + + /// Creates a new [`SequencerClient`] with http transport with the given http client. + pub fn with_http_client( + sequencer_endpoint: impl Into<String>, + client: reqwest::Client, + ) -> Result<Self, Error> { + let sequencer_endpoint: String = sequencer_endpoint.into(); + let url = sequencer_endpoint + .parse() + .map_err(|_| Error::InvalidUrl(sequencer_endpoint.clone()))?; + + let http_client = Http::with_client(client, url); + let is_local = http_client.guess_local(); + let client = ClientBuilder::default().transport(http_client, is_local); + + let inner = SequencerClientInner { sequencer_endpoint, client }; + Ok(Self { inner: Arc::new(inner) }) + } + + /// Returns the network of the client + pub fn endpoint(&self) -> &str { + &self.inner.sequencer_endpoint + } + + /// Returns the client + pub fn client(&self) -> &Client { + &self.inner.client + } + + /// Sends a [`alloy_rpc_client::RpcCall`] request to the sequencer endpoint. + async fn send_rpc_call<Params: RpcSend, Resp: RpcRecv>( + &self, + method: &str, + params: Params, + ) -> Result<Resp, SequencerClientError> { + let resp = + self.client().request::<Params, Resp>(method.to_string(), params).await.inspect_err( + |err| { + warn!( + target: "scroll::rpc::sequencer", + %err, + "HTTP request to sequencer failed", + ); + }, + )?; + Ok(resp) + } + + /// Forwards a transaction to the sequencer endpoint. + pub async fn forward_raw_transaction(&self, tx: &[u8]) -> Result<B256, SequencerClientError> { + let rlp_hex = hex::encode_prefixed(tx); + let tx_hash = + self.send_rpc_call("eth_sendRawTransaction", (rlp_hex,)).await.inspect_err(|err| { + warn!( + target: "scroll::rpc::eth", + %err, + "Failed to forward transaction to sequencer", + ); + })?; + + Ok(tx_hash) + } +} + +#[derive(Debug)] +struct SequencerClientInner { + /// The endpoint of the sequencer + sequencer_endpoint: String, + /// The client + client: Client, +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::U64; + + #[tokio::test] + async fn test_http_body_str() { + let client = SequencerClient::new("http://localhost:8545").await.unwrap(); + + let request = client + .client() + .make_request("eth_getBlockByNumber", (U64::from(10),)) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_getBlockByNumber","params":["0xa"],"id":0,"jsonrpc":"2.0"}"# + ); + + let request = client + .client() + .make_request("eth_sendRawTransaction", format!("0x{}", hex::encode("abcd"))) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_sendRawTransaction","params":"0x61626364","id":1,"jsonrpc":"2.0"}"# + ); + } + + #[tokio::test] + #[ignore = "Start if WS is reachable at ws://localhost:8546"] + async fn test_ws_body_str() { + let client = SequencerClient::new("ws://localhost:8546").await.unwrap(); + + let request = client + .client() + .make_request("eth_getBlockByNumber", (U64::from(10),)) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_getBlockByNumber","params":["0xa"],"id":0,"jsonrpc":"2.0"}"# + ); + + let request = client + .client() + .make_request("eth_sendRawTransaction", format!("0x{}", hex::encode("abcd"))) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_sendRawTransaction","params":"0x61626364","id":1,"jsonrpc":"2.0"}"# + ); + } +}
diff --git reth/crates/scroll/trie/Cargo.toml scroll-reth/crates/scroll/trie/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4154ad181afe452c66590e79ed870c82e7d70aa6 --- /dev/null +++ scroll-reth/crates/scroll/trie/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "reth-scroll-trie" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-trie.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-trie = { workspace = true, features = ["serde"] } + +# misc +poseidon-bn254 = { workspace = true, features = ["bn254"] } +tracing.workspace = true
diff --git reth/crates/scroll/trie/README.md scroll-reth/crates/scroll/trie/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8645b5efadd58f51d8d04146614741788a95febd --- /dev/null +++ scroll-reth/crates/scroll/trie/README.md @@ -0,0 +1,5 @@ +# scroll-trie + +Fast binary Merkle-Patricia Trie (zktrie) state root calculator and proof generator for prefix-sorted bits. + +Please see the specification of zktrie [here](assets/zktrie.md). \ No newline at end of file
diff --git reth/crates/scroll/trie/assets/arch.png scroll-reth/crates/scroll/trie/assets/arch.png new file mode 100644 index 0000000000000000000000000000000000000000..aca3f8d0c5ce4eedb5d40b8dedb39d226e57efbe Binary files /dev/null and scroll-reth/crates/scroll/trie/assets/arch.png differ
diff --git reth/crates/scroll/trie/assets/deletion.png scroll-reth/crates/scroll/trie/assets/deletion.png new file mode 100644 index 0000000000000000000000000000000000000000..6c699226e0996a30199a3ff4d2441700390cb8e2 Binary files /dev/null and scroll-reth/crates/scroll/trie/assets/deletion.png differ
diff --git reth/crates/scroll/trie/assets/insertion.png scroll-reth/crates/scroll/trie/assets/insertion.png new file mode 100644 index 0000000000000000000000000000000000000000..942338a07f2ea46e33fc899fd222b6ae6671f5e4 Binary files /dev/null and scroll-reth/crates/scroll/trie/assets/insertion.png differ
diff --git reth/crates/scroll/trie/assets/zktrie.md scroll-reth/crates/scroll/trie/assets/zktrie.md new file mode 100644 index 0000000000000000000000000000000000000000..b86e1a016ab8bae8e13f7e1b9f7fe1d29bfc7711 --- /dev/null +++ scroll-reth/crates/scroll/trie/assets/zktrie.md @@ -0,0 +1,186 @@ +# zkTrie Spec + +## 1. Tree Structure + +<figure> +<img src="https://raw.githubusercontent.com/scroll-tech/reth/refs/heads/scroll/crates/scroll/trie/assets/arch.png" alt="zkTrie Structure" style="width:80%"> +<figcaption align = "center"><b>Figure 1. zkTrie Structure</b></figcaption> +</figure> + +In essence, zkTrie is a sparse binary Merkle Patricia Trie, depicted in the above figure. +Before diving into the Sparse Binary Merkle Patricia Trie, let's briefly touch on Merkle Trees and Patricia Tries. +* **Merkle Tree**: A Merkle Tree is a tree where each leaf node represents a hash of a data block, and each non-leaf node represents the hash of its child nodes. +* **Patricia Trie**: A Patricia Trie is a type of radix tree or compressed trie used to store key-value pairs efficiently. It encodes the nodes with same prefix of the key to share the common path, where the path is determined by the value of the node key. + +As illustrated in the Figure 1, there are three types of nodes in the zkTrie. +- Parent Node (type: 0): Given the zkTrie is a binary tree, a parent node has two children. +- Leaf Node (type: 1): A leaf node holds the data of a key-value pair. +- Empty Node (type: 2): An empty node is a special type of node, indicating the sub-trie that shares the same prefix is empty. + +In zkTrie, we use Poseidon hash to compute the node hash because it's more friendly and efficient to prove it in the zk circuit. + +## 2. Tree Construction + +Given a key-value pair, we first compute a *secure key* for the corresponding leaf node by hashing the original key (i.e., account address and storage key) using the Poseidon hash function. This can make the key uniformly distributed over the key space. The node key hashing method is described in the [Node Hashing](#3-node-hashing) section below. + +We then encode the path of a new leaf node by traversing the secure key from Least Significant Bit (LSB) to the Most Significant Bit (MSB). At each step, if the bit is 0, we will traverse to the left child; otherwise, traverse to the right child. + +We limit the maximum depth of zkTrie to 248, meaning that the tree will only traverse the lower 248 bits of the key. This is because the secure key space is a finite field used by Poseidon hash that doesn't occupy the full range of power of 2. This leads to an ambiguous bit representation of the key in a finite field and thus causes a soundness issue in the zk circuit. But if we truncate the key to lower 248 bits, the key space can fully occupy the range of $2^{248}$ and won't have the ambiguity in the bit representation. + +We also apply an optimization to reduce the tree depth by contracting a subtree that has only one leaf node to a single leaf node. For example, in the Figure 1, the tree has three nodes in total, with keys `0100`, `0010`, and `1010`. Because there is only one node that has key with suffix `00`, the leaf node for key `0100` only traverses the suffix `00` and doesn't fully expand its key which would have resulted in depth of 4. + +## 3. Node Hashing + +In this section, we will describe how leaf secure key and node merkle hash are computed. We use Poseidon hash in both hashing computation, denoted as `h` in the doc below. + +<aside> +💡 Note: We use `init_state = 0` in the Poseidon hash function for all use cases in the zkTrie. +</aside> + +### 3.1 Empty Node + +The node hash of an empty node is 0. + +### 3.2 Parent Node + +The parent node hash is computed as follows + +```go +parentNodeHash = h(leftChildHash, rightChildHash) +``` + +### 3.3 Leaf Node + +The node hash of a leaf node is computed as follows + +```go +leafNodeHash = h(h(1, nodeKey), valueHash) +``` + +The leaf node can hold two types of values: Ethereum accounts and storage key-value pairs. Next, we will describe how the node key and value hash are computed for each leaf node type. + +#### Ethereum Account Leaf Node +For an Ethereum Account Leaf Node, it consists of an Ethereum address and a state account struct. The secure key is derived from the Ethereum address. +``` +address[0:20] (20 bytes in big-endian) +valHi = address[0:16] +valLo = address[16:20] * 2^96 (padding 12 bytes of 0 at the end) +nodeKey = h(valHi, valLo) +``` + +A state account struct in the Scroll consists of the following fields (`Fr` indicates the finite field used in Poseidon hash and is a 254-bit value) + +- `Nonce`: u64 +- `Balance`: u256, but treated as Fr +- `StorageRoot`: Fr +- `KeccakCodeHash`: u256 +- `PoseidonCodeHash`: Fr +- `CodeSize`: u64 + +Before computing the value hash, the state account is first marshaled into a list of `u256` values. The marshaling scheme is + +``` +(The following scheme assumes the big-endian encoding) +[0:32] (bytes in big-endian) + [0:16] Reserved with all 0 + [16:24] CodeSize, uint64 in big-endian + [24:32] Nonce, uint64 in big-endian +[32:64] Balance +[64:96] StorageRoot +[96:128] KeccakCodeHash +[128:160] PoseidonCodehash +(total 160 bytes) +``` + +The marshal function also returns a `flag` value along with a vector of `u256` values. The `flag` is a bitmap that indicates whether a `u256` value CANNOT be treated as a field element (Fr). The `flag` value for state account is 8, shown below. + +``` ++--------------------+---------+------+----------+----------+ +| 0 | 1 | 2 | 3 | 4 | (index) ++--------------------+---------+------+----------+----------+ +| nonce||codesize||0 | balance | root | keccak | poseidon | (u256) ++--------------------+---------+------+----------+----------+ +| 0 | 0 | 0 | 1 | 0 | (flag bits) ++--------------------+---------+------+----------+----------+ +(LSB) (MSB) +``` + +The value hash is computed in two steps: +1. Convert the value that cannot be represented as a field element of the Poseidon hash to the field element. +2. Combine field elements in a binary tree structure till the tree root is treated as the value hash. + +In the first step, when the bit in the `flag` is 1 indicating the `u256` value that cannot be treated as a field element, we split the value into a high-128bit value and a low-128bit value, and then pass them to a Poseidon hash to derive a field element value, `h(valueHi, valueLo)`. + +Based on the definition, the value hash of the state account is computed as follows. + +``` +valueHash = +h( + h( + h(nonce||codesize||0, balance), + h( + storageRoot, + h(keccakCodeHash[0:16], keccakCodeHash[16:32]), // convert Keccak codehash to a field element + ), + ), + poseidonCodeHash, +) +``` + +#### Storage Leaf Node + +For a Storage Leaf Node, it is a key-value pair, which both are a `u256` value. The secure key of this leaf node is derived from the storage key. + +``` +storageKey[0:32] (32 bytes in big-endian) +valHi = storageKey[0:16] +valLo = storageKey[16:32] +nodeKey = h(valHi, valLo) +``` + +The storage value is a `u256` value. The `flag` for the storage value is 1, showed below. + +``` ++--------------+ +| 0 | (index) ++--------------+ +| storageValue | (u256) ++--------------+ +| 1 | (flag bits) ++--------------+ +``` + +The value hash is computed as follows + +```go +valueHash = h(storageValue[0:16], storageValue[16:32]) +``` + +## 4. Tree Operations + +### 4.1 Insertion + +<figure> +<img src="https://raw.githubusercontent.com/scroll-tech/reth/refs/heads/scroll/crates/scroll/trie/assets/insertion.png" alt="zkTrie Structure" style="width:80%"> +<figcaption align = "center"><b>Figure 2. Insert a new leaf node to zkTrie</b></figcaption> +</figure> + +When we insert a new leaf node to the existing zkTrie, there could be two cases illustrated in the Figure 2. + +1. When traversing the path of the node key, it reaches an empty node (Figure 2(b)). In this case, we just need to replace this empty node by this leaf node and backtrace the path to update the merkle hash of parent nodes till the root. +2. When traversing the path of the node key, it reaches another leaf node `b` (Figure 2(c)). In this case, we need to push down the existing leaf node `b` until the next bit in the node keys of two leaf nodes differs. At each push-down step, we need to insert an empty sibling node when necessary. When we reach the level where the bits differ, we then place two leaf nodes `b` and `c` as the left child and the right child depending on their bits. At last, we backtrace the path and update the merkle hash of all parent nodes. + +### 4.2 Deletion + +<figure> +<img src="https://raw.githubusercontent.com/scroll-tech/reth/refs/heads/scroll/crates/scroll/trie/assets/deletion.png" alt="zkTrie Structure" style="width:80%"> +<figcaption align = "center"><b>Figure 3. Delete a leaf node from the zkTrie</b></figcaption> +</figure> + + +The deletion of a leaf node is similar to the insertion. There are two cases illustrated in the Figure 3. + +1. The sibling node of to-be-deleted leaf node is a parent node (Figure 3(b)). In this case, we can just replace the node `a` by an empty node and update the node hash of its ancestors till the root node. +2. The node of to-be-deleted leaf node is a leaf node (Figure 3(c)). Similarly, we first replace the leaf node by an empty node and start to contract its sibling node upwards until its sibling node is not an empty node. For example, in Figure 3(c), we first replace the leaf node `b` by an empty node. During the contraction, since the sibling of node `c` now becomes an empty node, we move node `c` one level upward to replace its parent node. The new sibling of node `c`, node `e`, is still an empty node. So again we move node `c` upward. Now that the sibling of node `c` is node `a`, the deletion process is finished. + +Note that the sibling of a leaf node in a valid zkTrie cannot be an empty node. Otherwise, we should always prune the subtree and move the leaf node upwards.
diff --git reth/crates/scroll/trie/src/branch.rs scroll-reth/crates/scroll/trie/src/branch.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6b433913d53e71f2db801ae4c89b3a6f975041c --- /dev/null +++ scroll-reth/crates/scroll/trie/src/branch.rs @@ -0,0 +1,155 @@ +use super::{ + BRANCH_NODE_LBRB_DOMAIN, BRANCH_NODE_LBRT_DOMAIN, BRANCH_NODE_LTRB_DOMAIN, + BRANCH_NODE_LTRT_DOMAIN, +}; +use alloy_primitives::{hex, B256}; +use alloy_trie::TrieMask; +use core::{fmt, ops::Range, slice::Iter}; +use poseidon_bn254::{hash_with_domain, Fr, PrimeField}; + +#[allow(unused_imports)] +use alloc::vec::Vec; + +/// The range of valid child indexes. +pub(crate) const CHILD_INDEX_RANGE: Range<u8> = 0..2; + +/// A trie mask to extract the two child indexes from a branch node. +pub(crate) const CHILD_INDEX_MASK: TrieMask = TrieMask::new(0b11); + +/// A reference to branch node and its state mask. +/// NOTE: The stack may contain more items that specified in the state mask. +#[derive(Clone)] +pub(crate) struct BranchNodeRef<'a> { + /// Reference to the collection of hash nodes. + /// NOTE: The referenced stack might have more items than the number of children + /// for this node. We should only ever access items starting from + /// [`BranchNodeRef::first_child_index`]. + pub stack: &'a [B256], + /// Reference to bitmask indicating the presence of children at + /// the respective nibble positions. + pub state_mask: TrieMask, +} + +impl fmt::Debug for BranchNodeRef<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BranchNodeRef") + .field("stack", &self.stack.iter().map(hex::encode).collect::<Vec<_>>()) + .field("state_mask", &self.state_mask) + .field("first_child_index", &self.first_child_index()) + .finish() + } +} + +impl<'a> BranchNodeRef<'a> { + /// Create a new branch node from the stack of nodes. + #[inline] + pub(crate) const fn new(stack: &'a [B256], state_mask: TrieMask) -> Self { + Self { stack, state_mask } + } + + /// Returns the stack index of the first child for this node. + /// + /// # Panics + /// + /// If the stack length is less than number of children specified in state mask. + /// Means that the node is in inconsistent state. + #[inline] + pub(crate) fn first_child_index(&self) -> usize { + self.stack + .len() + .checked_sub((self.state_mask & CHILD_INDEX_MASK).count_ones() as usize) + .expect("branch node stack is in inconsistent state") + } + + #[inline] + fn children(&self) -> impl Iterator<Item = (u8, Option<&B256>)> + '_ { + BranchChildrenIter::new(self) + } + + /// Given the hash mask of children, return an iterator over stack items + /// that match the mask. + #[inline] + pub(crate) fn child_hashes(&self, hash_mask: TrieMask) -> impl Iterator<Item = B256> + '_ { + self.children() + .filter_map(|(i, c)| c.map(|c| (i, c))) + .filter(move |(index, _)| hash_mask.is_bit_set(*index)) + .map(|(_, child)| B256::from_slice(&child[..])) + } + + pub(crate) fn hash(&self) -> B256 { + let mut children_iter = self.children(); + + let left_child = children_iter + .next() + .map(|(_, c)| *c.unwrap_or_default()) + .expect("branch node has two children"); + let left_child = + Fr::from_repr_vartime(left_child.0).expect("left child is a valid field element"); + let right_child = children_iter + .next() + .map(|(_, c)| *c.unwrap_or_default()) + .expect("branch node has two children"); + let right_child = + Fr::from_repr_vartime(right_child.0).expect("right child is a valid field element"); + + hash_with_domain(&[left_child, right_child], self.hashing_domain()).to_repr().into() + } + + fn hashing_domain(&self) -> Fr { + match *self.state_mask { + 0b1011 => BRANCH_NODE_LBRT_DOMAIN, + 0b1111 => BRANCH_NODE_LTRT_DOMAIN, + 0b0111 => BRANCH_NODE_LTRB_DOMAIN, + 0b0011 => BRANCH_NODE_LBRB_DOMAIN, + _ => unreachable!("invalid branch node state mask"), + } + } +} + +/// Iterator over branch node children. +#[derive(Debug)] +struct BranchChildrenIter<'a> { + range: Range<u8>, + state_mask: TrieMask, + stack_iter: Iter<'a, B256>, +} + +impl<'a> BranchChildrenIter<'a> { + /// Create new iterator over branch node children. + fn new(node: &BranchNodeRef<'a>) -> Self { + Self { + range: CHILD_INDEX_RANGE, + state_mask: node.state_mask, + stack_iter: node.stack[node.first_child_index()..].iter(), + } + } +} + +impl<'a> Iterator for BranchChildrenIter<'a> { + type Item = (u8, Option<&'a B256>); + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + let i = self.range.next()?; + let value = self + .state_mask + .is_bit_set(i) + .then(|| unsafe { self.stack_iter.next().unwrap_unchecked() }); + Some((i, value)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let len = self.len(); + (len, Some(len)) + } +} + +impl core::iter::FusedIterator for BranchChildrenIter<'_> {} + +impl ExactSizeIterator for BranchChildrenIter<'_> { + #[inline] + fn len(&self) -> usize { + self.range.len() + } +}
diff --git reth/crates/scroll/trie/src/constants.rs scroll-reth/crates/scroll/trie/src/constants.rs new file mode 100644 index 0000000000000000000000000000000000000000..67bd414f835834ecca93f29497a396384c87f84e --- /dev/null +++ scroll-reth/crates/scroll/trie/src/constants.rs @@ -0,0 +1,4 @@ +use alloy_primitives::B256; + +/// The root hash of an empty binary Merle Patricia trie. +pub const EMPTY_ROOT_HASH: B256 = B256::ZERO;
diff --git reth/crates/scroll/trie/src/hash_builder.rs scroll-reth/crates/scroll/trie/src/hash_builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..60bdf14087f523e9857e82d884fb034599ceb6d8 --- /dev/null +++ scroll-reth/crates/scroll/trie/src/hash_builder.rs @@ -0,0 +1,568 @@ +use crate::{ + branch::{BranchNodeRef, CHILD_INDEX_MASK}, + constants::EMPTY_ROOT_HASH, + leaf::HashLeaf, + sub_tree::SubTreeRef, +}; +use alloy_primitives::{map::HashMap, B256}; +use alloy_trie::{ + hash_builder::{HashBuilderValue, HashBuilderValueRef}, + nodes::LeafNodeRef, + proof::{ProofNodes, ProofRetainer}, + BranchNodeCompact, Nibbles, TrieMask, +}; +use core::cmp; +use tracing::trace; + +#[derive(Debug, Default)] +#[allow(missing_docs)] +pub struct HashBuilder { + pub key: Nibbles, + pub value: HashBuilderValue, + pub stack: Vec<B256>, + + // TODO(scroll): Introduce terminator / leaf masks + pub state_masks: Vec<TrieMask>, + pub tree_masks: Vec<TrieMask>, + pub hash_masks: Vec<TrieMask>, + + pub stored_in_database: bool, + + pub updated_branch_nodes: Option<HashMap<Nibbles, BranchNodeCompact>>, + pub proof_retainer: Option<ProofRetainer>, +} + +impl HashBuilder { + /// Enables the Hash Builder to store updated branch nodes. + /// + /// Call [`HashBuilder::split`] to get the updates to branch nodes. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + self.set_updates(retain_updates); + self + } + + /// Enable specified proof retainer. + pub fn with_proof_retainer(mut self, retainer: ProofRetainer) -> Self { + self.proof_retainer = Some(retainer); + self + } + + /// Enables the Hash Builder to store updated branch nodes. + /// + /// Call [`HashBuilder::split`] to get the updates to branch nodes. + pub fn set_updates(&mut self, retain_updates: bool) { + if retain_updates { + self.updated_branch_nodes = Some(HashMap::default()); + } + } + + /// Splits the [`HashBuilder`] into a [`HashBuilder`] and hash builder updates. + pub fn split(mut self) -> (Self, HashMap<Nibbles, BranchNodeCompact>) { + let updates = self.updated_branch_nodes.take(); + (self, updates.unwrap_or_default()) + } + + /// Take and return retained proof nodes. + pub fn take_proof_nodes(&mut self) -> ProofNodes { + self.proof_retainer.take().map(ProofRetainer::into_proof_nodes).unwrap_or_default() + } + + /// The number of total updates accrued. + /// Returns `0` if [`Self::with_updates`] was not called. + pub fn updates_len(&self) -> usize { + self.updated_branch_nodes.as_ref().map(|u| u.len()).unwrap_or(0) + } + + /// Print the current stack of the Hash Builder. + pub fn print_stack(&self) { + println!("============ STACK ==============="); + for item in &self.stack { + println!("{}", alloy_primitives::hex::encode(item)); + } + println!("============ END STACK ==============="); + } + + /// Adds a new leaf element and its value to the trie hash builder. + pub fn add_leaf(&mut self, key: Nibbles, value: &[u8]) { + assert!(key > self.key, "add_leaf key {:?} self.key {:?}", key, self.key); + if !self.key.is_empty() { + self.update(&key); + } + self.set_key_value(key, HashBuilderValueRef::Bytes(value)); + } + + /// Adds a new branch element and its hash to the trie hash builder. + pub fn add_branch(&mut self, key: Nibbles, value: B256, stored_in_database: bool) { + assert!( + key > self.key || (self.key.is_empty() && key.is_empty()), + "add_branch key {:?} self.key {:?}", + key, + self.key + ); + if !self.key.is_empty() { + self.update(&key); + } else if key.is_empty() { + self.stack.push(value); + } + self.set_key_value(key, HashBuilderValueRef::Hash(&value)); + self.stored_in_database = stored_in_database; + } + + /// Returns the current root hash of the trie builder. + pub fn root(&mut self) -> B256 { + // Clears the internal state + if !self.key.is_empty() { + self.update(&Nibbles::default()); + self.key.clear(); + self.value.clear(); + } + let root = self.current_root(); + if root == EMPTY_ROOT_HASH { + if let Some(proof_retainer) = self.proof_retainer.as_mut() { + proof_retainer.retain(&Nibbles::default(), &[]) + } + } + root + } + + #[inline] + fn set_key_value(&mut self, key: Nibbles, value: HashBuilderValueRef<'_>) { + self.log_key_value("old value"); + self.key = key; + self.value.set_from_ref(value); + self.log_key_value("new value"); + } + + fn log_key_value(&self, msg: &str) { + trace!(target: "trie::hash_builder", + key = ?self.key, + value = ?self.value, + "{msg}", + ); + } + + fn current_root(&self) -> B256 { + if let Some(node_ref) = self.stack.last() { + let mut root = *node_ref; + root.reverse(); + root + } else { + EMPTY_ROOT_HASH + } + } + + /// Given a new element, it appends it to the stack and proceeds to loop through the stack state + /// and convert the nodes it can into branch / extension nodes and hash them. This ensures + /// that the top of the stack always contains the merkle root corresponding to the trie + /// built so far. + fn update(&mut self, succeeding: &Nibbles) { + let mut build_extensions = false; + // current / self.key is always the latest added element in the trie + let mut current = self.key.clone(); + debug_assert!(!current.is_empty()); + + trace!(target: "trie::hash_builder", ?current, ?succeeding, "updating merkle tree"); + + let mut i = 0usize; + loop { + let _span = tracing::trace_span!(target: "trie::hash_builder", "loop", i, ?current, build_extensions).entered(); + + let preceding_exists = !self.state_masks.is_empty(); + let preceding_len = self.state_masks.len().saturating_sub(1); + + let common_prefix_len = succeeding.common_prefix_length(current.as_slice()); + let len = cmp::max(preceding_len, common_prefix_len); + assert!(len < current.len(), "len {} current.len {}", len, current.len()); + + trace!( + target: "trie::hash_builder", + ?len, + ?common_prefix_len, + ?preceding_len, + preceding_exists, + "prefix lengths after comparing keys" + ); + + // Adjust the state masks for branch calculation + let extra_digit = current[len]; + if self.state_masks.len() <= len { + let new_len = len + 1; + trace!(target: "trie::hash_builder", new_len, old_len = self.state_masks.len(), "scaling state masks to fit"); + self.state_masks.resize(new_len, TrieMask::default()); + } + self.state_masks[len] |= TrieMask::from_nibble(extra_digit); + trace!( + target: "trie::hash_builder", + ?extra_digit, + groups = ?self.state_masks, + ); + + // Adjust the tree masks for exporting to the DB + if self.tree_masks.len() < current.len() { + self.resize_masks(current.len()); + } + + let mut len_from = len; + if !succeeding.is_empty() || preceding_exists { + len_from += 1; + } + trace!(target: "trie::hash_builder", "skipping {len_from} nibbles"); + + // The key without the common prefix + let short_node_key = current.slice(len_from..); + trace!(target: "trie::hash_builder", ?short_node_key); + + // Concatenate the 2 nodes together + if !build_extensions { + match self.value.as_ref() { + HashBuilderValueRef::Bytes(leaf_value) => { + // TODO(scroll): Replace with terminator masks + // Set the terminator mask for the leaf node + self.state_masks[len] |= TrieMask::new(0b100 << extra_digit); + let leaf_node = LeafNodeRef::new(&current, leaf_value); + let leaf_hash = leaf_node.hash_leaf(); + trace!( + target: "trie::hash_builder", + ?leaf_node, + ?leaf_hash, + "pushing leaf node", + ); + self.stack.push(leaf_hash); + // self.retain_proof_from_stack(&current.slice(..len_from)); + } + HashBuilderValueRef::Hash(hash) => { + trace!(target: "trie::hash_builder", ?hash, "pushing branch node hash"); + self.stack.push(*hash); + + if self.stored_in_database { + self.tree_masks[current.len() - 1] |= TrieMask::from_nibble( + current + .last() + .expect("must have at least a single bit in the current key"), + ); + } + self.hash_masks[current.len() - 1] |= TrieMask::from_nibble( + current + .last() + .expect("must have at least a single bit in the current key"), + ); + + build_extensions = true; + } + } + } + + if build_extensions && !short_node_key.is_empty() { + self.update_masks(&current, len_from); + let stack_last = self.stack.pop().expect("there should be at least one stack item"); + let sub_tree = SubTreeRef::new(&short_node_key, &stack_last); + let sub_tree_root = sub_tree.root(); + + trace!( + target: "trie::hash_builder", + ?short_node_key, + ?sub_tree_root, + "pushing subtree root", + ); + self.stack.push(sub_tree_root); + // self.retain_proof_from_stack(&current.slice(..len_from)); + self.resize_masks(len_from); + } + + if preceding_len <= common_prefix_len && !succeeding.is_empty() { + trace!(target: "trie::hash_builder", "no common prefix to create branch nodes from, returning"); + return; + } + + // Insert branch nodes in the stack + if !succeeding.is_empty() || preceding_exists { + // Pushes the corresponding branch node to the stack + let children = self.push_branch_node(&current, len); + // Need to store the branch node in an efficient format outside of the hash builder + self.store_branch_node(&current, len, children); + } + + self.state_masks.resize(len, TrieMask::default()); + self.resize_masks(len); + + if preceding_len == 0 { + trace!(target: "trie::hash_builder", "0 or 1 state masks means we have no more elements to process"); + return; + } + + current.truncate(preceding_len); + trace!(target: "trie::hash_builder", ?current, "truncated nibbles to {} bytes", preceding_len); + + trace!(target: "trie::hash_builder", groups = ?self.state_masks, "popping empty state masks"); + while self.state_masks.last() == Some(&TrieMask::default()) { + self.state_masks.pop(); + } + + build_extensions = true; + + i += 1; + } + } + + /// Given the size of the longest common prefix, it proceeds to create a branch node + /// from the state mask and existing stack state, and store its RLP to the top of the stack, + /// after popping all the relevant elements from the stack. + /// + /// Returns the hashes of the children of the branch node, only if `updated_branch_nodes` is + /// enabled. + fn push_branch_node(&mut self, _current: &Nibbles, len: usize) -> Vec<B256> { + let state_mask = self.state_masks[len]; + let hash_mask = self.hash_masks[len]; + let branch_node = BranchNodeRef::new(&self.stack, state_mask); + // Avoid calculating this value if it's not needed. + let children = if self.updated_branch_nodes.is_some() { + branch_node.child_hashes(hash_mask).collect() + } else { + vec![] + }; + + let branch_hash = branch_node.hash(); + + // TODO: enable proof retention + // self.retain_proof_from_stack(&current.slice(..len)); + + // Clears the stack from the branch node elements + let first_child_idx = branch_node.first_child_index(); + trace!( + target: "trie::hash_builder", + new_len = first_child_idx, + old_len = self.stack.len(), + "resizing stack to prepare branch node" + ); + self.stack.resize_with(first_child_idx, Default::default); + + trace!(target: "trie::hash_builder", ?branch_hash, "pushing branch node with {state_mask:?} mask + from stack"); + + self.stack.push(branch_hash); + children + } + + /// Given the current nibble prefix and the highest common prefix length, proceeds + /// to update the masks for the next level and store the branch node and the + /// masks in the database. We will use that when consuming the intermediate nodes + /// from the database to efficiently build the trie. + fn store_branch_node(&mut self, current: &Nibbles, len: usize, children: Vec<B256>) { + trace!(target: "trie::hash_builder", ?current, ?len, ?children, "store branch node"); + if len > 0 { + let parent_index = len - 1; + self.hash_masks[parent_index] |= TrieMask::from_nibble(current[parent_index]); + } + + let store_in_db_trie = !self.tree_masks[len].is_empty() || !self.hash_masks[len].is_empty(); + if store_in_db_trie { + if len > 0 { + let parent_index = len - 1; + self.tree_masks[parent_index] |= TrieMask::from_nibble(current[parent_index]); + } + + if self.updated_branch_nodes.is_some() { + let common_prefix = current.slice(..len); + let node = BranchNodeCompact::new( + self.state_masks[len] & CHILD_INDEX_MASK, + self.tree_masks[len], + self.hash_masks[len], + children, + (len == 0).then(|| self.current_root()), + ); + trace!(target: "trie::hash_builder", ?node, "storing updated intermediate node"); + self.updated_branch_nodes + .as_mut() + .expect("updates_branch_nodes is some") + .insert(common_prefix, node); + } + } + } + + // TODO(scroll): Enable proof retention + // fn retain_proof_from_stack(&mut self, prefix: &Nibbles) { + // if let Some(proof_retainer) = self.proof_retainer.as_mut() { + // proof_retainer.retain( + // prefix, + // self.stack.last().expect("there should be at least one stack item").as_ref(), + // ); + // } + // } + + fn update_masks(&mut self, current: &Nibbles, len_from: usize) { + if len_from > 0 { + let flag = TrieMask::from_nibble(current[len_from - 1]); + + self.hash_masks[len_from - 1] &= !flag; + + if !self.tree_masks[current.len() - 1].is_empty() { + self.tree_masks[len_from - 1] |= flag; + } + } + } + + fn resize_masks(&mut self, new_len: usize) { + trace!( + target: "trie::hash_builder", + new_len, + old_tree_mask_len = self.tree_masks.len(), + old_hash_mask_len = self.hash_masks.len(), + "resizing tree/hash masks" + ); + self.tree_masks.resize(new_len, TrieMask::default()); + self.hash_masks.resize(new_len, TrieMask::default()); + } +} + +// TODO(scroll): Introduce generic for the HashBuilder. +impl From<reth_trie::HashBuilder> for HashBuilder { + fn from(hash_builder: reth_trie::HashBuilder) -> Self { + Self { + key: hash_builder.key, + value: hash_builder.value, + stack: hash_builder + .stack + .into_iter() + .map(|x| x.as_slice().try_into().expect("RlpNode contains 32 byte hashes")) + .collect(), + state_masks: hash_builder.groups, + tree_masks: hash_builder.tree_masks, + hash_masks: hash_builder.hash_masks, + stored_in_database: hash_builder.stored_in_database, + updated_branch_nodes: hash_builder.updated_branch_nodes, + proof_retainer: hash_builder.proof_retainer, + } + } +} + +impl From<HashBuilder> for reth_trie::HashBuilder { + fn from(value: HashBuilder) -> Self { + Self { + key: value.key, + value: value.value, + stack: value + .stack + .into_iter() + .map(|x| { + reth_trie::RlpNode::from_raw(&x.0).expect("32 byte hash can be cast to RlpNode") + }) + .collect(), + groups: value.state_masks, + tree_masks: value.tree_masks, + hash_masks: value.hash_masks, + stored_in_database: value.stored_in_database, + updated_branch_nodes: value.updated_branch_nodes, + proof_retainer: value.proof_retainer, + rlp_buf: Default::default(), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::key::AsBytes; + use alloc::collections::BTreeMap; + use poseidon_bn254::{hash_with_domain, Fr, PrimeField}; + + #[test] + fn test_basic_trie() { + // Test a basic trie consisting of three key value pairs: + // (0, 0, 0, 0, ... , 0) + // (0, 0, 0, 1, ... , 0) + // (0, 0, 1, 0, ... , 0) + // (1, 1, 1, 0, ... , 0) + // (1, 1, 1, 1, ... , 0) + // The branch associated with key 0xF will be collapsed into a single leaf. + + let leaf_1_key = Nibbles::from_nibbles_unchecked([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]); + let leaf_2_key = Nibbles::from_nibbles_unchecked([ + 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]); + let leaf_3_key = Nibbles::from_nibbles_unchecked([ + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]); + let leaf_4_key = Nibbles::from_nibbles_unchecked([ + 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]); + let leaf_5_key = Nibbles::from_nibbles_unchecked([ + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]); + let leaf_keys = [ + leaf_1_key.clone(), + leaf_2_key.clone(), + leaf_3_key.clone(), + leaf_4_key.clone(), + leaf_5_key.clone(), + ]; + + let leaf_values = leaf_keys + .into_iter() + .enumerate() + .map(|(i, key)| { + let mut leaf_value = [0u8; 32]; + leaf_value[0] = i as u8 + 1; + (key, leaf_value) + }) + .collect::<BTreeMap<_, _>>(); + + let leaf_hashes: BTreeMap<_, _> = leaf_values + .iter() + .map(|(key, value)| { + let key_fr = + Fr::from_repr_vartime(key.as_bytes()).expect("key is valid field element"); + let value = Fr::from_repr_vartime(*value).expect("value is a valid field element"); + let hash = hash_with_domain(&[key_fr, value], crate::LEAF_NODE_DOMAIN); + (key.clone(), hash) + }) + .collect(); + + let mut hb = HashBuilder::default().with_updates(true); + + for (key, val) in &leaf_values { + hb.add_leaf(key.clone(), val); + } + + let root = hb.root(); + + // node_000 -> hash(leaf_1, leaf_2) LTRT + // node_00 -> hash(node_000, leaf_3) LBRT + // node_0 -> hash(node_00, EMPTY) LBRT + // node_111 -> hash(leaf_4, leaf_5) LTRT + // node_11 -> hash(EMPTY, node_111) LTRB + // node_1 -> hash(EMPTY, node_11) LTRB + // root -> hash(node_0, node_1) LBRB + + let expected: B256 = { + let node_000 = hash_with_domain( + &[*leaf_hashes.get(&leaf_1_key).unwrap(), *leaf_hashes.get(&leaf_2_key).unwrap()], + crate::BRANCH_NODE_LTRT_DOMAIN, + ); + let node_00 = hash_with_domain( + &[node_000, *leaf_hashes.get(&leaf_3_key).unwrap()], + crate::BRANCH_NODE_LBRT_DOMAIN, + ); + let node_0 = hash_with_domain(&[node_00, Fr::zero()], crate::BRANCH_NODE_LBRT_DOMAIN); + let node_111 = hash_with_domain( + &[*leaf_hashes.get(&leaf_4_key).unwrap(), *leaf_hashes.get(&leaf_5_key).unwrap()], + crate::BRANCH_NODE_LTRT_DOMAIN, + ); + let node_11 = hash_with_domain(&[Fr::zero(), node_111], crate::BRANCH_NODE_LTRB_DOMAIN); + let node_1 = hash_with_domain(&[Fr::zero(), node_11], crate::BRANCH_NODE_LTRB_DOMAIN); + + let mut root = + hash_with_domain(&[node_0, node_1], crate::BRANCH_NODE_LBRB_DOMAIN).to_repr(); + root.reverse(); + root.into() + }; + + assert_eq!(expected, root); + } +}
diff --git reth/crates/scroll/trie/src/key.rs scroll-reth/crates/scroll/trie/src/key.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a58bd9f9ce06ceb31532ec32ace2c10471b079f --- /dev/null +++ scroll-reth/crates/scroll/trie/src/key.rs @@ -0,0 +1,21 @@ +use reth_trie::Nibbles; + +/// A type that can return its bytes representation encoded as a little-endian on 32 bytes. +pub(crate) trait AsBytes { + /// Returns the type as its canonical little-endian representation on 32 bytes. + fn as_bytes(&self) -> [u8; 32]; +} + +impl AsBytes for Nibbles { + fn as_bytes(&self) -> [u8; 32] { + // This is strange we are now representing the leaf key using big endian?? + let mut result = [0u8; 32]; + for (byte_index, bytes) in self.as_slice().chunks(8).enumerate() { + for (bit_index, byte) in bytes.iter().enumerate() { + result[byte_index] |= byte << bit_index; + } + } + + result + } +}
diff --git reth/crates/scroll/trie/src/leaf.rs scroll-reth/crates/scroll/trie/src/leaf.rs new file mode 100644 index 0000000000000000000000000000000000000000..79216d8f562dfdf8e2bbe231e0454858e0459ae2 --- /dev/null +++ scroll-reth/crates/scroll/trie/src/leaf.rs @@ -0,0 +1,23 @@ +use super::LEAF_NODE_DOMAIN; +use crate::key::AsBytes; +use alloy_primitives::B256; +use alloy_trie::nodes::LeafNodeRef; +use poseidon_bn254::{hash_with_domain, Fr, PrimeField}; + +/// A trait used to hash the leaf node. +pub(crate) trait HashLeaf { + /// Hash the leaf node. + fn hash_leaf(&self) -> B256; +} + +impl HashLeaf for LeafNodeRef<'_> { + fn hash_leaf(&self) -> B256 { + let leaf_key = + Fr::from_repr_vartime(self.key.as_bytes()).expect("leaf key is a valid field element"); + let leaf_value = Fr::from_repr_vartime( + <[u8; 32]>::try_from(self.value).expect("leaf value is 32 bytes"), + ) + .expect("leaf value is a valid field element"); + hash_with_domain(&[leaf_key, leaf_value], LEAF_NODE_DOMAIN).to_repr().into() + } +}
diff --git reth/crates/scroll/trie/src/lib.rs scroll-reth/crates/scroll/trie/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b1a5fd5aa93d6ad8bc735ff4a6d4bb1fe17679e --- /dev/null +++ scroll-reth/crates/scroll/trie/src/lib.rs @@ -0,0 +1,37 @@ +//! Fast binary Merkle-Patricia Trie (zktrie) state root calculator and proof generator for +//! prefix-sorted bits. + +#![cfg_attr(not(doctest), doc = include_str!("../assets/zktrie.md"))] + +#[macro_use] +#[allow(unused_imports)] +extern crate alloc; + +mod branch; + +mod constants; +pub use constants::EMPTY_ROOT_HASH; + +mod key; +mod leaf; +mod sub_tree; + +mod hash_builder; +pub use hash_builder::HashBuilder; + +use poseidon_bn254::Fr; + +/// The hashing domain for leaf nodes. +pub const LEAF_NODE_DOMAIN: Fr = Fr::from_raw([4, 0, 0, 0]); + +/// The hashing domain for a branch node with two terminal children. +pub const BRANCH_NODE_LTRT_DOMAIN: Fr = Fr::from_raw([6, 0, 0, 0]); + +/// The hashing domain for a branch node with a left terminal child and a right branch child. +pub const BRANCH_NODE_LTRB_DOMAIN: Fr = Fr::from_raw([7, 0, 0, 0]); + +/// The hashing domain for a branch node with a left branch child and a right terminal child. +pub const BRANCH_NODE_LBRT_DOMAIN: Fr = Fr::from_raw([8, 0, 0, 0]); + +/// The hashing domain for a branch node with two branch children. +pub const BRANCH_NODE_LBRB_DOMAIN: Fr = Fr::from_raw([9, 0, 0, 0]);
diff --git reth/crates/scroll/trie/src/sub_tree.rs scroll-reth/crates/scroll/trie/src/sub_tree.rs new file mode 100644 index 0000000000000000000000000000000000000000..31edad05c4166dcab958f28450206e81b61ab13f --- /dev/null +++ scroll-reth/crates/scroll/trie/src/sub_tree.rs @@ -0,0 +1,44 @@ +use super::{BRANCH_NODE_LBRT_DOMAIN, BRANCH_NODE_LTRB_DOMAIN}; +use alloy_primitives::{hex, B256}; +use alloy_trie::Nibbles; +use core::fmt; +use poseidon_bn254::{hash_with_domain, Fr, PrimeField}; + +/// [`SubTreeRef`] is a structure that allows for calculation of the root of a sparse binary Merkle +/// tree consisting of a single leaf node. +pub(crate) struct SubTreeRef<'a> { + /// The key to the child node. + pub key: &'a Nibbles, + /// A pointer to the child node. + pub child: &'a B256, +} + +impl<'a> SubTreeRef<'a> { + /// Creates a new subtree with the given key and a pointer to the child. + #[inline] + pub(crate) const fn new(key: &'a Nibbles, child: &'a B256) -> Self { + Self { key, child } + } + + pub(crate) fn root(&self) -> B256 { + let mut tree_root = + Fr::from_repr_vartime(self.child.0).expect("child is a valid field element"); + for bit in self.key.as_slice().iter().rev() { + tree_root = if *bit == 0 { + hash_with_domain(&[tree_root, Fr::zero()], BRANCH_NODE_LBRT_DOMAIN) + } else { + hash_with_domain(&[Fr::zero(), tree_root], BRANCH_NODE_LTRB_DOMAIN) + }; + } + tree_root.to_repr().into() + } +} + +impl fmt::Debug for SubTreeRef<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SubTreeRef") + .field("key", &self.key) + .field("node", &hex::encode(self.child)) + .finish() + } +}
diff --git reth/crates/scroll/txpool/Cargo.toml scroll-reth/crates/scroll/txpool/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ede06079cf92ff3a3d029c4c47f1c088f6d254cf --- /dev/null +++ scroll-reth/crates/scroll/txpool/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "reth-scroll-txpool" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# ethereum +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-primitives.workspace = true + +# reth +reth-chainspec.workspace = true +reth-primitives-traits.workspace = true +reth-revm.workspace = true +reth-storage-api.workspace = true +reth-transaction-pool.workspace = true + +# revm-scroll +revm-scroll.workspace = true + +# reth-scroll +reth-scroll-consensus.workspace = true +reth-scroll-evm.workspace = true +reth-scroll-forks.workspace = true +reth-scroll-primitives.workspace = true + +# scroll-alloy +scroll-alloy-consensus.workspace = true + +# misc +c-kzg.workspace = true +derive_more.workspace = true +parking_lot.workspace = true +tracing.workspace = true + +[dev-dependencies] +reth-scroll-chainspec.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] }
diff --git reth/crates/scroll/txpool/src/lib.rs scroll-reth/crates/scroll/txpool/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..a1885e05acb3cd8a06c49e0daa63bdcba9ff2181 --- /dev/null +++ scroll-reth/crates/scroll/txpool/src/lib.rs @@ -0,0 +1,16 @@ +//! Transaction pool for Scroll node. + +mod transaction; +pub use transaction::ScrollPooledTransaction; + +mod validator; +pub use validator::{ScrollL1BlockInfo, ScrollTransactionValidator}; + +use reth_transaction_pool::{CoinbaseTipOrdering, Pool, TransactionValidationTaskExecutor}; + +/// Type alias for default scroll transaction pool +pub type ScrollTransactionPool<Client, S, T = ScrollPooledTransaction> = Pool< + TransactionValidationTaskExecutor<ScrollTransactionValidator<Client, T>>, + CoinbaseTipOrdering<T>, + S, +>;
diff --git reth/crates/scroll/txpool/src/transaction.rs scroll-reth/crates/scroll/txpool/src/transaction.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0fdf7b66c1671086538b75c36397ba988136177 --- /dev/null +++ scroll-reth/crates/scroll/txpool/src/transaction.rs @@ -0,0 +1,263 @@ +use alloy_consensus::{transaction::Recovered, BlobTransactionValidationError, Typed2718}; +use alloy_eips::{ + eip2930::AccessList, eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization, +}; +use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256}; +use c_kzg::KzgSettings; +use core::fmt::Debug; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use reth_scroll_primitives::ScrollTransactionSigned; +use reth_transaction_pool::{ + EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, PoolTransaction, +}; +use scroll_alloy_consensus::ScrollTransaction; +use std::sync::{Arc, OnceLock}; + +/// Pool transaction for Scroll. +/// +/// This type wraps the actual transaction and caches values that are frequently used by the pool. +/// For payload building this lazily tracks values that are required during payload building: +/// - Estimated compressed size of this transaction +#[derive(Debug, Clone, derive_more::Deref)] +pub struct ScrollPooledTransaction< + Cons = ScrollTransactionSigned, + Pooled = scroll_alloy_consensus::ScrollPooledTransaction, +> { + #[deref] + inner: EthPooledTransaction<Cons>, + /// The pooled transaction type. + _pd: core::marker::PhantomData<Pooled>, + + /// Cached EIP-2718 encoded bytes of the transaction, lazily computed. + encoded_2718: OnceLock<Bytes>, +} + +impl<Cons: SignedTransaction, Pooled> ScrollPooledTransaction<Cons, Pooled> { + /// Create new instance of [Self]. + pub fn new(transaction: Recovered<Cons>, encoded_length: usize) -> Self { + Self { + inner: EthPooledTransaction::new(transaction, encoded_length), + _pd: core::marker::PhantomData, + encoded_2718: Default::default(), + } + } + + /// Returns lazily computed EIP-2718 encoded bytes of the transaction. + pub fn encoded_2718(&self) -> &Bytes { + self.encoded_2718.get_or_init(|| self.inner.transaction().encoded_2718().into()) + } +} + +impl<Cons, Pooled> PoolTransaction for ScrollPooledTransaction<Cons, Pooled> +where + Cons: SignedTransaction + From<Pooled>, + Pooled: SignedTransaction + TryFrom<Cons, Error: core::error::Error>, +{ + type TryFromConsensusError = <Pooled as TryFrom<Cons>>::Error; + type Consensus = Cons; + type Pooled = Pooled; + + fn clone_into_consensus(&self) -> Recovered<Self::Consensus> { + self.inner.transaction().clone() + } + + fn into_consensus(self) -> Recovered<Self::Consensus> { + self.inner.transaction + } + + fn from_pooled(tx: Recovered<Self::Pooled>) -> Self { + let encoded_len = tx.encode_2718_len(); + Self::new(tx.convert(), encoded_len) + } + + fn hash(&self) -> &TxHash { + self.inner.transaction.tx_hash() + } + + fn sender(&self) -> Address { + self.inner.transaction.signer() + } + + fn sender_ref(&self) -> &Address { + self.inner.transaction.signer_ref() + } + + fn cost(&self) -> &U256 { + &self.inner.cost + } + + fn encoded_length(&self) -> usize { + self.inner.encoded_length + } +} + +impl<Cons: Typed2718, Pooled> Typed2718 for ScrollPooledTransaction<Cons, Pooled> { + fn ty(&self) -> u8 { + self.inner.ty() + } +} + +impl<Cons: InMemorySize, Pooled> InMemorySize for ScrollPooledTransaction<Cons, Pooled> { + fn size(&self) -> usize { + self.inner.size() + } +} + +impl<Cons, Pooled> alloy_consensus::Transaction for ScrollPooledTransaction<Cons, Pooled> +where + Cons: alloy_consensus::Transaction, + Pooled: Debug + Send + Sync + 'static, +{ + fn chain_id(&self) -> Option<u64> { + self.inner.chain_id() + } + + fn nonce(&self) -> u64 { + self.inner.nonce() + } + + fn gas_limit(&self) -> u64 { + self.inner.gas_limit() + } + + fn gas_price(&self) -> Option<u128> { + self.inner.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.inner.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option<u128> { + self.inner.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option<u128> { + self.inner.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.inner.priority_fee_or_price() + } + + fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 { + self.inner.effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.inner.is_dynamic_fee() + } + + fn kind(&self) -> TxKind { + self.inner.kind() + } + + fn is_create(&self) -> bool { + self.inner.is_create() + } + + fn value(&self) -> U256 { + self.inner.value() + } + + fn input(&self) -> &Bytes { + self.inner.input() + } + + fn access_list(&self) -> Option<&AccessList> { + self.inner.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.inner.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.inner.authorization_list() + } +} + +impl<Cons, Pooled> EthPoolTransaction for ScrollPooledTransaction<Cons, Pooled> +where + Cons: SignedTransaction + From<Pooled>, + Pooled: SignedTransaction + TryFrom<Cons>, + <Pooled as TryFrom<Cons>>::Error: core::error::Error, +{ + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + EthBlobTransactionSidecar::None + } + + fn try_into_pooled_eip4844( + self, + _sidecar: Arc<BlobTransactionSidecarVariant>, + ) -> Option<Recovered<Self::Pooled>> { + None + } + + fn try_from_eip4844( + _tx: Recovered<Self::Consensus>, + _sidecar: BlobTransactionSidecarVariant, + ) -> Option<Self> { + None + } + + fn validate_blob( + &self, + _sidecar: &BlobTransactionSidecarVariant, + _settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError> { + Err(BlobTransactionValidationError::NotBlobTransaction(self.ty())) + } +} + +impl<Cons: ScrollTransaction, Pooled> ScrollTransaction for ScrollPooledTransaction<Cons, Pooled> { + fn is_l1_message(&self) -> bool { + self.transaction.is_l1_message() + } + + fn queue_index(&self) -> Option<u64> { + self.transaction.queue_index() + } +} + +#[cfg(test)] +mod tests { + use crate::{ScrollPooledTransaction, ScrollTransactionValidator}; + use alloy_consensus::{transaction::Recovered, Signed}; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::Signature; + use reth_provider::test_utils::MockEthProvider; + use reth_scroll_chainspec::SCROLL_MAINNET; + use reth_scroll_primitives::ScrollTransactionSigned; + use reth_transaction_pool::{ + blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, TransactionOrigin, + TransactionValidationOutcome, + }; + use scroll_alloy_consensus::{ScrollTypedTransaction, TxL1Message}; + #[test] + fn validate_scroll_transaction() { + let client = MockEthProvider::default().with_chain_spec(SCROLL_MAINNET.clone()); + let validator = EthTransactionValidatorBuilder::new(client) + .no_shanghai() + .no_cancun() + .build(InMemoryBlobStore::default()); + let validator = ScrollTransactionValidator::new(validator); + + let origin = TransactionOrigin::External; + let signer = Default::default(); + let deposit_tx = ScrollTypedTransaction::L1Message(TxL1Message::default()); + let signature = Signature::test_signature(); + let signed_tx: ScrollTransactionSigned = Signed::new_unhashed(deposit_tx, signature).into(); + let signed_recovered = Recovered::new_unchecked(signed_tx, signer); + let len = signed_recovered.encode_2718_len(); + let pooled_tx: ScrollPooledTransaction = + ScrollPooledTransaction::new(signed_recovered, len); + let outcome = validator.validate_one(origin, pooled_tx); + + let err = match outcome { + TransactionValidationOutcome::Invalid(_, err) => err, + _ => panic!("Expected invalid transaction"), + }; + assert_eq!(err.to_string(), "transaction type not supported"); + } +}
diff --git reth/crates/scroll/txpool/src/validator.rs scroll-reth/crates/scroll/txpool/src/validator.rs new file mode 100644 index 0000000000000000000000000000000000000000..65cead657eeb5c9f6f6f4d0eb4d92e9e2fefbd2d --- /dev/null +++ scroll-reth/crates/scroll/txpool/src/validator.rs @@ -0,0 +1,276 @@ +use alloy_consensus::BlockHeader; +use alloy_eips::Encodable2718; +use parking_lot::RwLock; +use reth_chainspec::ChainSpecProvider; +use reth_primitives_traits::{ + transaction::error::InvalidTransactionError, Block, GotExpected, SealedBlock, +}; +use reth_revm::database::StateProviderDatabase; +use reth_scroll_consensus::MAX_ROLLUP_FEE; +use reth_scroll_evm::{ + compute_compressed_size, compute_compression_ratio, spec_id_at_timestamp_and_number, + RethL1BlockInfo, +}; +use reth_scroll_forks::ScrollHardforks; +use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; +use reth_transaction_pool::{ + EthPoolTransaction, EthTransactionValidator, TransactionOrigin, TransactionValidationOutcome, + TransactionValidator, +}; +use revm_scroll::l1block::L1BlockInfo; +use scroll_alloy_consensus::ScrollTransaction; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; + +/// Tracks additional infos for the current block. +#[derive(Debug, Default)] +pub struct ScrollL1BlockInfo { + /// The current L1 block info. + l1_block_info: RwLock<L1BlockInfo>, + /// Current block timestamp. + timestamp: AtomicU64, + /// Current block number. + number: AtomicU64, +} + +/// Validator for Scroll transactions. +#[derive(Debug)] +pub struct ScrollTransactionValidator<Client, Tx> { + /// The type that performs the actual validation. + inner: EthTransactionValidator<Client, Tx>, + /// Additional block info required for validation. + block_info: Arc<ScrollL1BlockInfo>, + /// If true, ensure that the transaction's sender has enough balance to cover the L1 gas fee + /// derived from the tracked L1 block info. + require_l1_data_gas_fee: bool, +} + +impl<Client, Tx> ScrollTransactionValidator<Client, Tx> { + /// Returns the configured chain spec + pub fn chain_spec(&self) -> Arc<Client::ChainSpec> + where + Client: ChainSpecProvider, + { + self.inner.chain_spec() + } + + /// Returns the configured client + pub const fn client(&self) -> &Client { + self.inner.client() + } + + /// Returns the current block timestamp. + fn block_timestamp(&self) -> u64 { + self.block_info.timestamp.load(Ordering::Relaxed) + } + + /// Returns the current block number. + fn block_number(&self) -> u64 { + self.block_info.number.load(Ordering::Relaxed) + } + + /// Whether to ensure that the transaction's sender has enough balance to also cover the L1 gas + /// fee. + pub fn require_l1_data_gas_fee(self, require_l1_data_gas_fee: bool) -> Self { + Self { require_l1_data_gas_fee, ..self } + } + + /// Returns whether this validator also requires the transaction's sender to have enough balance + /// to cover the L1 gas fee. + pub const fn requires_l1_data_gas_fee(&self) -> bool { + self.require_l1_data_gas_fee + } +} + +impl<Client, Tx> ScrollTransactionValidator<Client, Tx> +where + Client: ChainSpecProvider<ChainSpec: ScrollHardforks> + StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction + ScrollTransaction, +{ + /// Create a new [`ScrollTransactionValidator`]. + pub fn new(inner: EthTransactionValidator<Client, Tx>) -> Self { + let this = Self::with_block_info(inner, ScrollL1BlockInfo::default()); + if let Ok(Some(block)) = + this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) + { + this.block_info.timestamp.store(block.header().timestamp(), Ordering::Relaxed); + this.block_info.number.store(block.header().number(), Ordering::Relaxed); + this.update_l1_block_info(block.header()); + } + + this + } + + /// Create a new [`ScrollTransactionValidator`] with the given [`ScrollL1BlockInfo`]. + pub fn with_block_info( + inner: EthTransactionValidator<Client, Tx>, + block_info: ScrollL1BlockInfo, + ) -> Self { + Self { inner, block_info: Arc::new(block_info), require_l1_data_gas_fee: true } + } + + /// Update the L1 block info for the given header and system transaction, if any. + pub fn update_l1_block_info<H>(&self, header: &H) + where + H: BlockHeader, + { + self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed); + self.block_info.number.store(header.number(), Ordering::Relaxed); + + let provider = + self.client().state_by_block_number_or_tag(header.number().into()).expect("msg"); + let mut db = StateProviderDatabase::new(provider); + let spec_id = + spec_id_at_timestamp_and_number(header.timestamp(), header.number(), self.chain_spec()); + if let Ok(l1_block_info) = L1BlockInfo::try_fetch(&mut db, spec_id) { + *self.block_info.l1_block_info.write() = l1_block_info; + } + } + + /// Validates a single transaction. + /// + /// See also [`TransactionValidator::validate_transaction`] + /// + /// This behaves the same as [`EthTransactionValidator::validate_one`], but in addition, ensures + /// that the account has enough balance to cover the L1 gas cost. + pub fn validate_one( + &self, + origin: TransactionOrigin, + transaction: Tx, + ) -> TransactionValidationOutcome<Tx> { + if transaction.is_eip4844() { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::Eip4844Disabled.into(), + ) + } + if transaction.is_l1_message() { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } + + let outcome = self.inner.validate_one(origin, transaction); + if outcome.is_invalid() || outcome.is_error() { + tracing::trace!(target: "scroll_txpool", ?outcome, "tx pool validation failed") + } + + if !self.requires_l1_data_gas_fee() { + // no need to check L1 gas fee + return outcome + } + + // ensure that the account has enough balance to cover the L1 gas cost + if let TransactionValidationOutcome::Valid { + balance, + state_nonce, + transaction: valid_tx, + propagate, + bytecode_hash, + authorities, + } = outcome + { + let mut l1_block_info = self.block_info.l1_block_info.read().clone(); + + let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); + let tx = valid_tx.transaction().clone_into_consensus(); + tx.encode_2718(&mut encoded); + + // Note, compression ratio is computed on tx.input, + // while compressed size is computed on the full encoded transaction. + let compression_ratio = compute_compression_ratio(valid_tx.transaction().input()); + let compressed_size = compute_compressed_size(&encoded); + + let cost_addition = match l1_block_info.l1_tx_data_fee( + self.chain_spec(), + self.block_timestamp(), + self.block_number(), + &encoded, + Some((compression_ratio, compressed_size)), + false, + ) { + Ok(cost) => cost, + Err(err) => { + return TransactionValidationOutcome::Error(*valid_tx.hash(), Box::new(err)) + } + }; + // Check rollup fee is under u64::MAX. + if cost_addition >= MAX_ROLLUP_FEE { + return TransactionValidationOutcome::Invalid( + valid_tx.into_transaction(), + InvalidTransactionError::GasUintOverflow.into(), + ) + } + + let cost = valid_tx.transaction().cost().saturating_add(cost_addition); + + // Checks for max cost + if cost > balance { + return TransactionValidationOutcome::Invalid( + valid_tx.into_transaction(), + InvalidTransactionError::InsufficientFunds( + GotExpected { got: balance, expected: cost }.into(), + ) + .into(), + ) + } + + return TransactionValidationOutcome::Valid { + balance, + state_nonce, + bytecode_hash, + transaction: valid_tx, + propagate, + authorities, + } + } + + outcome + } + + /// Validates all given transactions. + /// + /// Returns all outcomes for the given transactions in the same order. + /// + /// See also [`Self::validate_one`] + pub fn validate_all( + &self, + transactions: Vec<(TransactionOrigin, Tx)>, + ) -> Vec<TransactionValidationOutcome<Tx>> { + transactions.into_iter().map(|(origin, tx)| self.validate_one(origin, tx)).collect() + } +} + +impl<Client, Tx> TransactionValidator for ScrollTransactionValidator<Client, Tx> +where + Client: ChainSpecProvider<ChainSpec: ScrollHardforks> + StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction + ScrollTransaction, +{ + type Transaction = Tx; + + async fn validate_transaction( + &self, + origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome<Self::Transaction> { + self.validate_one(origin, transaction) + } + + async fn validate_transactions( + &self, + transactions: Vec<(TransactionOrigin, Self::Transaction)>, + ) -> Vec<TransactionValidationOutcome<Self::Transaction>> { + self.validate_all(transactions) + } + + fn on_new_head_block<B>(&self, new_tip_block: &SealedBlock<B>) + where + B: Block, + { + self.inner.on_new_head_block(new_tip_block); + self.update_l1_block_info(new_tip_block.header()); + } +}
diff --git reth/crates/ethereum/cli/Cargo.toml scroll-reth/crates/ethereum/cli/Cargo.toml index 01a7751e77b36947753fdc17777180f4117eabc9..e232ea0cdb1d7594294a7ea5d67d6d683d6996a0 100644 --- reth/crates/ethereum/cli/Cargo.toml +++ scroll-reth/crates/ethereum/cli/Cargo.toml @@ -35,7 +35,9 @@ # fs tempfile.workspace = true   [features] -default = ["jemalloc"] +default = ["jemalloc", "otlp"] + +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"]   dev = ["reth-cli-commands/arbitrary"]
diff --git reth/crates/ethereum/cli/src/app.rs scroll-reth/crates/ethereum/cli/src/app.rs index e99dae2ac771938b0da2d273def182ab54af1a92..805c91442575cd6b8c419e2d7ff4c8c724449c08 100644 --- reth/crates/ethereum/cli/src/app.rs +++ scroll-reth/crates/ethereum/cli/src/app.rs @@ -111,7 +111,18 @@ /// /// If file logging is enabled, this function stores guard to the struct. pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_level, + )?; + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); }
diff --git reth/crates/ethereum/cli/src/interface.rs scroll-reth/crates/ethereum/cli/src/interface.rs index 8f09b165e83571957c8a7ae76689782bf48ef93d..bb687df684a48236cdf5e6a96c9cf72ca130757f 100644 --- reth/crates/ethereum/cli/src/interface.rs +++ scroll-reth/crates/ethereum/cli/src/interface.rs @@ -18,7 +18,10 @@ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator}; use reth_tracing::FileWorkerGuard; @@ -42,6 +45,10 @@ /// The logging configuration for the CLI. #[command(flatten)] pub logs: LogArgs, + + /// The tracing configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs,   /// Type marker for the RPC module validator #[arg(skip)] @@ -212,8 +219,11 @@ /// Initializes tracing with the configured options. /// /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. + /// If an OTLP endpoint is specified, it will export metrics to the configured collector. pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> { - let guard = self.logs.init_tracing()?; + let layers = reth_tracing::Layers::new(); + + let guard = self.logs.init_tracing_with_layers(layers)?; Ok(guard) } } @@ -302,7 +312,15 @@ use reth_node_core::args::ColorMode;   #[test] fn parse_color_mode() { - let reth = Cli::try_parse_args_from(["reth", "node", "--color", "always"]).unwrap(); + let reth = Cli::try_parse_args_from([ + "reth", + "node", + "--color", + "always", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); assert_eq!(reth.logs.color, ColorMode::Always); }   @@ -329,7 +347,8 @@ /// Tests that the log directory is parsed correctly when using the node command. It's /// always tied to the specific chain's name. #[test] fn parse_logs_path_node() { - let mut reth = Cli::try_parse_args_from(["reth", "node"]).unwrap(); + let mut reth = + Cli::try_parse_args_from(["reth", "node", "--builder.gaslimit", "10000000"]).unwrap(); if let Some(chain_spec) = reth.command.chain_spec() { reth.logs.log_file_directory = reth.logs.log_file_directory.join(chain_spec.chain.to_string()); @@ -341,7 +360,15 @@ let mut iter = SUPPORTED_CHAINS.iter(); iter.next(); for chain in iter { - let mut reth = Cli::try_parse_args_from(["reth", "node", "--chain", chain]).unwrap(); + let mut reth = Cli::try_parse_args_from([ + "reth", + "node", + "--chain", + chain, + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); let chain = reth.command.chain_spec().map(|c| c.chain.to_string()).unwrap_or(String::new()); reth.logs.log_file_directory = reth.logs.log_file_directory.join(chain.clone());
diff --git reth/crates/ethereum/consensus/src/lib.rs scroll-reth/crates/ethereum/consensus/src/lib.rs index 3c0021fc2d2a4afe36a5115bb188ab16fe0e819e..00cf303e25dba8d524f61066f12e0e743095a189 100644 --- reth/crates/ethereum/consensus/src/lib.rs +++ scroll-reth/crates/ethereum/consensus/src/lib.rs @@ -29,7 +29,7 @@ Block, BlockHeader, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, };   mod validation; -pub use validation::validate_block_post_execution; +pub use validation::{validate_block_post_execution, verify_receipts};   /// Ethereum beacon consensus /// @@ -196,6 +196,7 @@ #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_consensus_common::validation::validate_against_parent_gas_limit; @@ -215,7 +216,7 @@ let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); let child = header_with_gas_limit((parent.gas_limit + 5) as u64);   assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::<Header>::default()), Ok(()) ); } @@ -226,7 +227,7 @@ let parent = header_with_gas_limit(MINIMUM_GAS_LIMIT); let child = header_with_gas_limit(MINIMUM_GAS_LIMIT - 1);   assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::<Header>::default()), Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: child.gas_limit as u64 }) ); } @@ -239,7 +240,7 @@ parent.gas_limit + parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1, );   assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::<Header>::default()), Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, @@ -253,7 +254,7 @@ let parent = header_with_gas_limit(GAS_LIMIT_BOUND_DIVISOR * 10); let child = header_with_gas_limit(parent.gas_limit - 5);   assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::<Header>::default()), Ok(()) ); } @@ -266,7 +267,7 @@ parent.gas_limit - parent.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1, );   assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::<Header>::default()), Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit,
diff --git reth/crates/ethereum/consensus/src/validation.rs scroll-reth/crates/ethereum/consensus/src/validation.rs index 71affffeb0cde13354aab1073e66675d5da6405e..0f88c195cf6576da35036ecb04ec169d53af4422 100644 --- reth/crates/ethereum/consensus/src/validation.rs +++ scroll-reth/crates/ethereum/consensus/src/validation.rs @@ -70,7 +70,7 @@ }   /// Calculate the receipts root, and compare it against the expected receipts root and logs /// bloom. -fn verify_receipts<R: Receipt>( +pub fn verify_receipts<R: Receipt>( expected_receipts_root: B256, expected_logs_bloom: Bloom, receipts: &[R],
diff --git reth/crates/ethereum/evm/src/build.rs scroll-reth/crates/ethereum/evm/src/build.rs index 5f5e014d29705452a28883bb6b24c3453165fd46..85d4cae311bb4067f42661e1aa60baff752f2820 100644 --- reth/crates/ethereum/evm/src/build.rs +++ scroll-reth/crates/ethereum/evm/src/build.rs @@ -1,7 +1,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_consensus::{ proofs::{self, calculate_receipt_root}, - Block, BlockBody, BlockHeader, Header, Transaction, TxReceipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, BlockHeader, Header, TxReceipt, EMPTY_OMMER_ROOT_HASH, }; use alloy_eips::merge::BEACON_NONCE; use alloy_evm::{block::BlockExecutorFactory, eth::EthBlockExecutionCtx}; @@ -10,6 +10,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::execute::{BlockAssembler, BlockAssemblerInput, BlockExecutionError}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{logs_bloom, Receipt, SignedTransaction}; +use revm::context::Block as _;   /// Block builder for Ethereum. #[derive(Debug, Clone)] @@ -47,12 +48,12 @@ evm_env, execution_ctx: ctx, parent, transactions, - output: BlockExecutionResult { receipts, requests, gas_used }, + output: BlockExecutionResult { receipts, requests, gas_used, blob_gas_used }, state_root, .. } = input;   - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to();   let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root( @@ -73,12 +74,11 @@ .is_prague_active_at_timestamp(timestamp) .then(|| requests.requests_hash());   let mut excess_blob_gas = None; - let mut blob_gas_used = None; + let mut block_blob_gas_used = None;   // only determine cancun fields when active if self.chain_spec.is_cancun_active_at_timestamp(timestamp) { - blob_gas_used = - Some(transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum()); + block_blob_gas_used = Some(*blob_gas_used); excess_blob_gas = if self.chain_spec.is_cancun_active_at_timestamp(parent.timestamp) { parent.maybe_next_block_excess_blob_gas( self.chain_spec.blob_params_at_timestamp(timestamp), @@ -96,23 +96,23 @@ let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: self.extra_data.clone(), parent_beacon_block_root: ctx.parent_beacon_block_root, - blob_gas_used, + blob_gas_used: block_blob_gas_used, excess_blob_gas, requests_hash, };
diff --git reth/crates/ethereum/evm/src/lib.rs scroll-reth/crates/ethereum/evm/src/lib.rs index eaf91f0c7beed83babdf0c377bc4fc4e871f486b..c0f8adc9c54172ffd2c330c201b07215cef7e6fe 100644 --- reth/crates/ethereum/evm/src/lib.rs +++ scroll-reth/crates/ethereum/evm/src/lib.rs @@ -132,6 +132,7 @@ Tx: TransactionEnv + FromRecoveredTx<TransactionSigned> + FromTxWithEncoded<TransactionSigned>, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug @@ -154,7 +155,7 @@ fn block_assembler(&self) -> &Self::BlockAssembler { &self.block_assembler }   - fn evm_env(&self, header: &Header) -> Result<EvmEnv, Self::Error> { + fn evm_env(&self, header: &Header) -> Result<EvmEnv<SpecId>, Self::Error> { Ok(EvmEnv::for_eth_block( header, self.chain_spec(), @@ -217,6 +218,7 @@ Tx: TransactionEnv + FromRecoveredTx<TransactionSigned> + FromTxWithEncoded<TransactionSigned>, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug
diff --git reth/crates/ethereum/evm/src/test_utils.rs scroll-reth/crates/ethereum/evm/src/test_utils.rs index 87875dbc848a63e796afd69bce72218d895d3876..fe791b9f5fd19ea102759eff74b976439afd9a6c 100644 --- reth/crates/ethereum/evm/src/test_utils.rs +++ scroll-reth/crates/ethereum/evm/src/test_utils.rs @@ -125,6 +125,7 @@ reqs.extend(req); reqs }), gas_used: 0, + blob_gas_used: 0, };   evm.db_mut().bundle_state = bundle;
diff --git reth/crates/ethereum/hardforks/src/display.rs scroll-reth/crates/ethereum/hardforks/src/display.rs index e40a117d26a3bb990c9625c3adaf56a2f4c344c0..7eda386a3cc26e3efa55a60d5fc7f2aaadc31224 100644 --- reth/crates/ethereum/hardforks/src/display.rs +++ scroll-reth/crates/ethereum/hardforks/src/display.rs @@ -119,7 +119,7 @@ format( "Pre-merge hard forks (block based)", &self.pre_merge, - self.with_merge.is_empty(), + self.with_merge.is_empty() && self.post_merge.is_empty(), f, )?;
diff --git reth/crates/ethereum/node/src/node.rs scroll-reth/crates/ethereum/node/src/node.rs index 089353f6b73f2ad71c842811c1b612b5b1bfa755..881539923ae3499b375007d67de1f19c861d52b0 100644 --- reth/crates/ethereum/node/src/node.rs +++ scroll-reth/crates/ethereum/node/src/node.rs @@ -15,7 +15,6 @@ }; use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, - SpecFor, TxEnvFor, }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ @@ -159,10 +158,9 @@ >, NetworkT: RpcTypes<TransactionRequest: SignableTxRequest<TxTy<N::Types>>>, EthRpcConverterFor<N, NetworkT>: RpcConvert< Primitives = PrimitivesTy<N::Types>, - TxEnv = TxEnvFor<N::Evm>, Error = EthApiError, Network = NetworkT, - Spec = SpecFor<N::Evm>, + Evm = N::Evm, >, EthApiError: FromEvmError<N::Evm>, { @@ -530,7 +528,7 @@ ctx: &BuilderContext<Node>, pool: Pool, ) -> eyre::Result<Self::Network> { let network = ctx.network_builder().await?; - let handle = ctx.start_network(network, pool); + let handle = ctx.start_network(network, pool, None); info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) }
diff --git reth/crates/ethereum/node/tests/e2e/dev.rs scroll-reth/crates/ethereum/node/tests/e2e/dev.rs index 5ccd74ecb2472e2e383108fe2752e75ca6ec811d..eb69452449f5a06484435572dfb7c6062337bb7e 100644 --- reth/crates/ethereum/node/tests/e2e/dev.rs +++ scroll-reth/crates/ethereum/node/tests/e2e/dev.rs @@ -3,8 +3,11 @@ use alloy_genesis::Genesis; use alloy_primitives::{b256, hex, Address}; use futures::StreamExt; use reth_chainspec::ChainSpec; -use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeTypes}; -use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeBuilder, NodeConfig, NodeHandle}; +use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeAddOns, NodeTypes}; +use reth_node_builder::{ + rpc::{RethRpcAddOns, RpcHandleProvider}, + FullNode, NodeBuilder, NodeConfig, NodeHandle, +}; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; @@ -82,6 +85,7 @@ where N: FullNodeComponents<Provider: CanonStateSubscriptions>, AddOns: RethRpcAddOns<N, EthApi: EthTransactions>, N::Types: NodeTypes<Primitives: FullNodePrimitives>, + <AddOns as NodeAddOns<N>>::Handle: RpcHandleProvider<N, <AddOns as RethRpcAddOns<N>>::EthApi>, { let mut notifications = node.provider.canonical_state_stream();   @@ -90,7 +94,7 @@ let raw_tx = hex!( "02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090" );   - let eth_api = node.rpc_registry.eth_api(); + let eth_api = node.rpc_handle().rpc_registry.eth_api();   let hash = eth_api.send_raw_transaction(raw_tx.into()).await.unwrap();
diff --git reth/crates/ethereum/node/tests/e2e/pool.rs scroll-reth/crates/ethereum/node/tests/e2e/pool.rs index 9187cb61405bf4e5453ff99c13cb613c9ff36d6c..027b59a7ae9479f0d33a87a7f5e1b4be927d8a46 100644 --- reth/crates/ethereum/node/tests/e2e/pool.rs +++ scroll-reth/crates/ethereum/node/tests/e2e/pool.rs @@ -45,7 +45,7 @@ .cancun_activated() .build(), ); let node_config = NodeConfig::test() - .with_chain(chain_spec) + .with_chain(chain_spec.clone()) .with_unused_ports() .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) @@ -67,6 +67,7 @@ executor.spawn_critical( "txpool maintenance task", reth_transaction_pool::maintain::maintain_transaction_pool_future( node.inner.provider.clone(), + chain_spec, txpool.clone(), node.inner.provider.clone().canonical_state_stream(), executor.clone(), @@ -120,7 +121,7 @@ .build(), ); let genesis_hash = chain_spec.genesis_hash(); let node_config = NodeConfig::test() - .with_chain(chain_spec) + .with_chain(chain_spec.clone()) .with_unused_ports() .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) @@ -139,6 +140,7 @@ executor.spawn_critical( "txpool maintenance task", reth_transaction_pool::maintain::maintain_transaction_pool_future( node.inner.provider.clone(), + chain_spec, txpool.clone(), node.inner.provider.clone().canonical_state_stream(), executor.clone(), @@ -269,6 +271,7 @@ executor.spawn_critical( "txpool maintenance task", reth_transaction_pool::maintain::maintain_transaction_pool_future( node.inner.provider.clone(), + MAINNET.clone(), txpool.clone(), node.inner.provider.clone().canonical_state_stream(), executor.clone(),
diff --git reth/crates/ethereum/payload/src/lib.rs scroll-reth/crates/ethereum/payload/src/lib.rs index 8c969c9d44c7112f2fde8dc2226f78d51120ddbe..7f40e983bc83aa808033b4459570032d3f2b213b 100644 --- reth/crates/ethereum/payload/src/lib.rs +++ scroll-reth/crates/ethereum/payload/src/lib.rs @@ -176,8 +176,8 @@ let chain_spec = client.chain_spec();   debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit: u64 = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee();   let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee,
diff --git reth/crates/optimism/chainspec/src/lib.rs scroll-reth/crates/optimism/chainspec/src/lib.rs index 2a78039dcf9af288388945547e86ff2b305af138..d5ff6d495d76335bff9f7b0d42884d853a1ac702 100644 --- reth/crates/optimism/chainspec/src/lib.rs +++ scroll-reth/crates/optimism/chainspec/src/lib.rs @@ -66,7 +66,8 @@ use alloy_primitives::{B256, U256}; use derive_more::{Constructor, Deref, From, Into}; use reth_chainspec::{ BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, - DisplayHardforks, EthChainSpec, EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, + DisplayHardforks, EthChainSpec, EthereumCapabilities, EthereumHardforks, ForkFilter, ForkId, + Hardforks, Head, }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; use reth_network_peers::NodeRecord; @@ -302,6 +303,8 @@ self.inner.next_block_base_fee(parent, target_timestamp) } } } + +impl EthereumCapabilities for OpChainSpec {}   impl Hardforks for OpChainSpec { fn fork<H: Hardfork>(&self, fork: H) -> ForkCondition {
diff --git reth/crates/optimism/cli/Cargo.toml scroll-reth/crates/optimism/cli/Cargo.toml index 422da3b883e28e74b530e76664986678336f9bc8..6ed24ca5823b4762b86b46328cbc88ee9956e2fd 100644 --- reth/crates/optimism/cli/Cargo.toml +++ scroll-reth/crates/optimism/cli/Cargo.toml @@ -74,6 +74,11 @@ [build-dependencies] reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] }   [features] +default = ["otlp"] + +# Opentelemtry feature to activate metrics export +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] + asm-keccak = [ "alloy-primitives/asm-keccak", "reth-node-core/asm-keccak",
diff --git reth/crates/optimism/cli/src/app.rs scroll-reth/crates/optimism/cli/src/app.rs index 1e9f7960ad1113fa27dd7331f4030430e3d2ad2d..891578cbe24ff6481179a0cf4a7df67b4406e549 100644 --- reth/crates/optimism/cli/src/app.rs +++ scroll-reth/crates/optimism/cli/src/app.rs @@ -116,7 +116,18 @@ /// /// If file logging is enabled, this function stores guard to the struct. pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_level, + )?; + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); }
diff --git reth/crates/optimism/cli/src/lib.rs scroll-reth/crates/optimism/cli/src/lib.rs index b55bbed3ad403df5db5e191e0ffcfe8d19695cba..7dd985ce25f035e950686c563c3779ccc1d33d45 100644 --- reth/crates/optimism/cli/src/lib.rs +++ scroll-reth/crates/optimism/cli/src/lib.rs @@ -48,7 +48,10 @@ use reth_cli_commands::launcher::FnLauncher; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_optimism_node::args::RollupArgs;   // This allows us to manually enable node metrics features, required for proper jemalloc metric @@ -72,6 +75,10 @@ /// The logging configuration for the CLI. #[command(flatten)] pub logs: LogArgs, + + /// The metrics configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs,   /// Type marker for the RPC module validator #[arg(skip)] @@ -144,7 +151,12 @@ use reth_optimism_node::args::RollupArgs;   #[test] fn parse_dev() { - let cmd = NodeCommand::<OpChainSpecParser, NoArgs>::parse_from(["op-reth", "--dev"]); + let cmd = NodeCommand::<OpChainSpecParser, NoArgs>::parse_from([ + "op-reth", + "--dev", + "--builder.gaslimit", + "30000000", + ]); let chain = OP_DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash(), chain.genesis_hash()); @@ -193,8 +205,11 @@ "--rpc.max-subscriptions-per-connection", "10000", "--metrics", "9003", + "--tracing-otlp=http://localhost:4318/v1/traces", "--log.file.max-size", "100", + "--builder.gaslimit", + "10000000", ]);   match cmd.command {
diff --git reth/crates/optimism/consensus/src/validation/isthmus.rs scroll-reth/crates/optimism/consensus/src/validation/isthmus.rs index 64d45eae5c8fddaccc81d0bca2f59ae1b47e8263..4703e10869e4bb1c8edd28bb808a228f3b006a05 100644 --- reth/crates/optimism/consensus/src/validation/isthmus.rs +++ scroll-reth/crates/optimism/consensus/src/validation/isthmus.rs @@ -4,7 +4,6 @@ use crate::OpConsensusError; use alloy_consensus::BlockHeader; use alloy_primitives::{address, Address, B256}; use alloy_trie::EMPTY_ROOT_HASH; -use core::fmt::Debug; use reth_storage_api::{errors::ProviderResult, StorageRootProvider}; use reth_trie_common::HashedStorage; use revm::database::BundleState; @@ -72,7 +71,7 @@ header: H, ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; @@ -110,7 +109,7 @@ header: H, ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + core::fmt::Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
diff --git reth/crates/optimism/evm/src/build.rs scroll-reth/crates/optimism/evm/src/build.rs index 087b7f10046fb0e2a0a229ecce47b1cf9bf43c41..edc877a9a5d92391c0c1672425b791813671c073 100644 --- reth/crates/optimism/evm/src/build.rs +++ scroll-reth/crates/optimism/evm/src/build.rs @@ -14,6 +14,7 @@ use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::{Receipt, SignedTransaction}; +use revm::context::Block as _;   /// Block builder for Optimism. #[derive(Debug)] @@ -53,7 +54,7 @@ .. } = input; let ctx = ctx.into();   - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to();   let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = @@ -88,19 +89,19 @@ let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: ctx.extra_data, parent_beacon_block_root: ctx.parent_beacon_block_root,
diff --git reth/crates/optimism/evm/src/l1.rs scroll-reth/crates/optimism/evm/src/l1.rs index a538c8d8690a266e8afbcb6d3005204d88104f75..4165221c9878717df0a5e55add7adaf1c7fc859d 100644 --- reth/crates/optimism/evm/src/l1.rs +++ scroll-reth/crates/optimism/evm/src/l1.rs @@ -88,10 +88,12 @@ .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::FeeOverheadConversion))?; let l1_fee_scalar = U256::try_from_be_slice(&data[224..256]) .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::FeeScalarConversion))?;   - let mut l1block = L1BlockInfo::default(); - l1block.l1_base_fee = l1_base_fee; - l1block.l1_fee_overhead = Some(l1_fee_overhead); - l1block.l1_base_fee_scalar = l1_fee_scalar; + let l1block = L1BlockInfo { + l1_base_fee, + l1_fee_overhead: Some(l1_fee_overhead), + l1_base_fee_scalar: l1_fee_scalar, + ..Default::default() + };   Ok(l1block) } @@ -140,11 +142,13 @@ .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?; let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96]) .ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?;   - let mut l1block = L1BlockInfo::default(); - l1block.l1_base_fee = l1_base_fee; - l1block.l1_base_fee_scalar = l1_base_fee_scalar; - l1block.l1_blob_base_fee = Some(l1_blob_base_fee); - l1block.l1_blob_base_fee_scalar = Some(l1_blob_base_fee_scalar); + let l1block = L1BlockInfo { + l1_base_fee, + l1_base_fee_scalar, + l1_blob_base_fee: Some(l1_blob_base_fee), + l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar), + ..Default::default() + };   Ok(l1block) } @@ -201,13 +205,15 @@ let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({ OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion) })?;   - let mut l1block = L1BlockInfo::default(); - l1block.l1_base_fee = l1_base_fee; - l1block.l1_base_fee_scalar = l1_base_fee_scalar; - l1block.l1_blob_base_fee = Some(l1_blob_base_fee); - l1block.l1_blob_base_fee_scalar = Some(l1_blob_base_fee_scalar); - l1block.operator_fee_scalar = Some(operator_fee_scalar); - l1block.operator_fee_constant = Some(operator_fee_constant); + let l1block = L1BlockInfo { + l1_base_fee, + l1_base_fee_scalar, + l1_blob_base_fee: Some(l1_blob_base_fee), + l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar), + operator_fee_scalar: Some(operator_fee_scalar), + operator_fee_constant: Some(operator_fee_constant), + ..Default::default() + };   Ok(l1block) }
diff --git reth/crates/optimism/evm/src/lib.rs scroll-reth/crates/optimism/evm/src/lib.rs index 2d598b9450121febcc4159ac5b27deb968e769e5..e5df16ee2e7509befcc6010385d4708f574a8a97 100644 --- reth/crates/optimism/evm/src/lib.rs +++ scroll-reth/crates/optimism/evm/src/lib.rs @@ -15,7 +15,7 @@ use alloc::sync::Arc; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::Decodable2718; use alloy_evm::{EvmFactory, FromRecoveredTx, FromTxWithEncoded}; -use alloy_op_evm::block::receipt_builder::OpReceiptBuilder; +use alloy_op_evm::block::{receipt_builder::OpReceiptBuilder, OpTxEnv}; use alloy_primitives::U256; use core::fmt::Debug; use op_alloy_consensus::EIP1559ParamError; @@ -131,9 +131,11 @@ R: OpReceiptBuilder<Receipt: DepositReceipt, Transaction: SignedTransaction>, EvmF: EvmFactory< Tx: FromRecoveredTx<R::Transaction> + FromTxWithEncoded<R::Transaction> - + TransactionEnv, + + TransactionEnv + + OpTxEnv, Precompiles = PrecompilesMap, Spec = OpSpecId, + BlockEnv = BlockEnv, > + Debug, Self: Send + Sync + Unpin + Clone + 'static, {
diff --git reth/crates/optimism/flashblocks/src/lib.rs scroll-reth/crates/optimism/flashblocks/src/lib.rs index e818e9cb538560bd2f7a591b38e17c5c9ab238ee..11647039930dfa5bd70aac8de038776104fe080a 100644 --- reth/crates/optimism/flashblocks/src/lib.rs +++ scroll-reth/crates/optimism/flashblocks/src/lib.rs @@ -4,7 +4,7 @@ pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, Metadata, }; -pub use service::FlashBlockService; +pub use service::{FlashBlockBuildInfo, FlashBlockService}; pub use ws::{WsConnect, WsFlashBlockStream};   mod consensus; @@ -12,7 +12,7 @@ pub use consensus::FlashBlockConsensusClient; mod payload; pub use payload::PendingFlashBlock; mod sequence; -pub use sequence::FlashBlockCompleteSequence; +pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence};   mod service; mod worker; @@ -28,3 +28,6 @@ /// /// [`FlashBlock`]: crate::FlashBlock pub type FlashBlockCompleteSequenceRx = tokio::sync::broadcast::Receiver<FlashBlockCompleteSequence>; + +/// Receiver that signals whether a [`FlashBlock`] is currently being built. +pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver<Option<FlashBlockBuildInfo>>;
diff --git reth/crates/optimism/flashblocks/src/sequence.rs scroll-reth/crates/optimism/flashblocks/src/sequence.rs index 087f97db7bef1d3634ef8660d78d9cb92ccc308d..59d4cfecbcd24385c2c1307d33fecc1af41b4635 100644 --- reth/crates/optimism/flashblocks/src/sequence.rs +++ scroll-reth/crates/optimism/flashblocks/src/sequence.rs @@ -13,7 +13,7 @@ const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128;   /// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. #[derive(Debug)] -pub(crate) struct FlashBlockPendingSequence<T> { +pub struct FlashBlockPendingSequence<T> { /// tracks the individual flashblocks in order /// /// With a blocktime of 2s and flashblock tick-rate of 200ms plus one extra flashblock per new @@ -29,7 +29,8 @@ impl<T> FlashBlockPendingSequence<T> where T: SignedTransaction, { - pub(crate) fn new() -> Self { + /// Create a new pending sequence. + pub fn new() -> Self { // Note: if the channel is full, send will not block but rather overwrite the oldest // messages. Order is preserved. let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); @@ -37,7 +38,7 @@ Self { inner: BTreeMap::new(), block_broadcaster: tx, state_root: None } }   /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.block_broadcaster.subscribe() }   @@ -70,7 +71,7 @@ /// Inserts a new block into the sequence. /// /// A [`FlashBlock`] with index 0 resets the set. - pub(crate) fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + pub fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { if flashblock.index == 0 { trace!(number=%flashblock.block_number(), "Tracking new flashblock sequence");   @@ -93,7 +94,7 @@ Ok(()) }   /// Set state root - pub(crate) const fn set_state_root(&mut self, state_root: Option<B256>) { + pub const fn set_state_root(&mut self, state_root: Option<B256>) { self.state_root = state_root; }   @@ -103,9 +104,7 @@ /// A flashblocks is not ready if there's missing previous flashblocks, i.e. there's a gap in /// the sequence /// /// Note: flashblocks start at `index 0`. - pub(crate) fn ready_transactions( - &self, - ) -> impl Iterator<Item = WithEncoded<Recovered<T>>> + '_ { + pub fn ready_transactions(&self) -> impl Iterator<Item = WithEncoded<Recovered<T>>> + '_ { self.inner .values() .enumerate() @@ -117,28 +116,37 @@ .flat_map(|(_, block)| block.txs.clone()) }   /// Returns the first block number - pub(crate) fn block_number(&self) -> Option<u64> { + pub fn block_number(&self) -> Option<u64> { Some(self.inner.values().next()?.block().metadata.block_number) }   /// Returns the payload base of the first tracked flashblock. - pub(crate) fn payload_base(&self) -> Option<ExecutionPayloadBaseV1> { + pub fn payload_base(&self) -> Option<ExecutionPayloadBaseV1> { self.inner.values().next()?.block().base.clone() }   /// Returns the number of tracked flashblocks. - pub(crate) fn count(&self) -> usize { + pub fn count(&self) -> usize { self.inner.len() }   /// Returns the reference to the last flashblock. - pub(crate) fn last_flashblock(&self) -> Option<&FlashBlock> { + pub fn last_flashblock(&self) -> Option<&FlashBlock> { self.inner.last_key_value().map(|(_, b)| &b.block) }   /// Returns the current/latest flashblock index in the sequence - pub(crate) fn index(&self) -> Option<u64> { + pub fn index(&self) -> Option<u64> { Some(self.inner.values().last()?.block().index) + } +} + +impl<T> Default for FlashBlockPendingSequence<T> +where + T: SignedTransaction, +{ + fn default() -> Self { + Self::new() } }
diff --git reth/crates/optimism/flashblocks/src/service.rs scroll-reth/crates/optimism/flashblocks/src/service.rs index f4cf7f18450d9438f14cd304b0984fa4c009250e..7e442470d98ed77712b6d07cc5e726ff7dbf942b 100644 --- reth/crates/optimism/flashblocks/src/service.rs +++ scroll-reth/crates/optimism/flashblocks/src/service.rs @@ -1,7 +1,8 @@ use crate::{ sequence::FlashBlockPendingSequence, worker::{BuildArgs, FlashBlockBuilder}, - ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, PendingFlashBlock, + ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, + PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; @@ -21,7 +22,10 @@ pin::Pin, task::{ready, Context, Poll}, time::Instant, }; -use tokio::{pin, sync::oneshot}; +use tokio::{ + pin, + sync::{oneshot, watch}, +}; use tracing::{debug, trace, warn};   pub(crate) const FB_STATE_ROOT_FROM_INDEX: usize = 9; @@ -48,11 +52,25 @@ /// Current `PendingFlashBlock` is built out of a sequence of `FlashBlocks`, and executed again /// when fb received on top of the same block. Avoid redundant I/O across multiple /// executions within the same block. cached_state: Option<(B256, CachedReads)>, + /// Signals when a block build is in progress + in_progress_tx: watch::Sender<Option<FlashBlockBuildInfo>>, + /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, /// Enable state root calculation from flashblock with index [`FB_STATE_ROOT_FROM_INDEX`] compute_state_root: bool, }   +/// Information for a flashblock currently built +#[derive(Debug, Clone, Copy)] +pub struct FlashBlockBuildInfo { + /// Parent block hash + pub parent_hash: B256, + /// Flashblock index within the current block's sequence + pub index: u64, + /// Block number of the flashblock being built. + pub block_number: u64, +} + impl<N, S, EvmConfig, Provider> FlashBlockService<N, S, EvmConfig, Provider> where N: NodePrimitives, @@ -73,6 +91,7 @@ + 'static, { /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self { + let (in_progress_tx, _) = watch::channel(None); Self { rx, current: None, @@ -83,6 +102,7 @@ rebuild: false, spawner, job: None, cached_state: None, + in_progress_tx, metrics: FlashBlockServiceMetrics::default(), compute_state_root: false, } @@ -97,6 +117,11 @@ /// Returns a subscriber to the flashblock sequence. pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.blocks.subscribe_block_sequence() + } + + /// Returns a receiver that signals when a flashblock is being built. + pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { + self.in_progress_tx.subscribe() }   /// Drives the services and sends new blocks to the receiver @@ -218,6 +243,8 @@ None => None, }; // reset job this.job.take(); + // No build in progress + let _ = this.in_progress_tx.send(None);   if let Some((now, result)) = result { match result { @@ -293,6 +320,13 @@ // try to build a block on top of latest if let Some(args) = this.build_args() { let now = Instant::now();   + let fb_info = FlashBlockBuildInfo { + parent_hash: args.base.parent_hash, + index: args.last_flashblock_index, + block_number: args.base.block_number, + }; + // Signal that a flashblock build has started with build metadata + let _ = this.in_progress_tx.send(Some(fb_info)); let (tx, rx) = oneshot::channel(); let builder = this.builder.clone();
diff --git reth/crates/optimism/node/src/node.rs scroll-reth/crates/optimism/node/src/node.rs index ebad4e6699926626ddd257e90095ec5a8b844474..ca4919fe63d0d101f4793ddc08e6447202cbdd52 100644 --- reth/crates/optimism/node/src/node.rs +++ scroll-reth/crates/optimism/node/src/node.rs @@ -1164,7 +1164,7 @@ pool: Pool, ) -> eyre::Result<Self::Network> { let network_config = self.network_config(ctx)?; let network = NetworkManager::builder(network_config).await?; - let handle = ctx.start_network(network, pool); + let handle = ctx.start_network(network, pool, None); info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized");   Ok(handle)
diff --git reth/crates/optimism/node/tests/it/builder.rs scroll-reth/crates/optimism/node/tests/it/builder.rs index e0437a5f655d3b0a6cffbcf5aaff92f351f01909..b495fdb47ce650bbb5524a40727d556845b68e5f 100644 --- reth/crates/optimism/node/tests/it/builder.rs +++ scroll-reth/crates/optimism/node/tests/it/builder.rs @@ -19,7 +19,7 @@ use reth_optimism_node::{args::RollupArgs, OpEvmConfig, OpExecutorBuilder, OpNode}; use reth_optimism_primitives::OpPrimitives; use reth_provider::providers::BlockchainProvider; use revm::{ - context::{Cfg, ContextTr, TxEnv}, + context::{BlockEnv, Cfg, ContextTr, TxEnv}, context_interface::result::EVMError, inspector::NoOpInspector, interpreter::interpreter::EthInterpreter, @@ -94,6 +94,7 @@ type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError, OpTransactionError>; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap;   fn create_evm<DB: Database>(
diff --git reth/crates/optimism/payload/src/builder.rs scroll-reth/crates/optimism/payload/src/builder.rs index 1d73464e178d8dacae2d5021c3ae26b5f7f518a7..ecc7a400349a498667f20fc2d0d08b6d50bc47e2 100644 --- reth/crates/optimism/payload/src/builder.rs +++ scroll-reth/crates/optimism/payload/src/builder.rs @@ -567,9 +567,9 @@ &self.config.attributes }   /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -659,10 +659,10 @@ mut best_txs: impl PayloadTransactions< Transaction: PoolTransaction<Consensus = TxTy<Evm::Primitives>> + OpPooledTx, >, ) -> Result<Option<()>, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; + let block_gas_limit = builder.evm_mut().block().gas_limit(); let block_da_limit = self.da_config.max_da_block_size(); let tx_da_limit = self.da_config.max_da_tx_size(); - let base_fee = builder.evm_mut().block().basefee; + let base_fee = builder.evm_mut().block().basefee();   while let Some(tx) = best_txs.next(()) { let interop = tx.interop_deadline();
diff --git reth/crates/optimism/rpc/src/eth/call.rs scroll-reth/crates/optimism/rpc/src/eth/call.rs index b7ce75c51b2625951fd93e496a4fcc1404440cbc..4e853984ac9248cecdd4f8d7d36cb10672f63c36 100644 --- reth/crates/optimism/rpc/src/eth/call.rs +++ scroll-reth/crates/optimism/rpc/src/eth/call.rs @@ -1,5 +1,4 @@ use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, FromEvmError, RpcConvert, @@ -9,12 +8,7 @@ impl<N, Rpc> EthCall for OpEthApi<N, Rpc> where N: RpcNodeCore, OpEthApiError: FromEvmError<N::Evm>, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor<N::Evm>, - Spec = SpecFor<N::Evm>, - >, + Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError, Evm = N::Evm>, { }   @@ -22,12 +16,7 @@ impl<N, Rpc> EstimateCall for OpEthApi<N, Rpc> where N: RpcNodeCore, OpEthApiError: FromEvmError<N::Evm>, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor<N::Evm>, - Spec = SpecFor<N::Evm>, - >, + Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError, Evm = N::Evm>, { }   @@ -35,12 +24,7 @@ impl<N, Rpc> Call for OpEthApi<N, Rpc> where N: RpcNodeCore, OpEthApiError: FromEvmError<N::Evm>, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor<N::Evm>, - Spec = SpecFor<N::Evm>, - >, + Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError, Evm = N::Evm>, { #[inline] fn call_gas_limit(&self) -> u64 {
diff --git reth/crates/optimism/rpc/src/eth/mod.rs scroll-reth/crates/optimism/rpc/src/eth/mod.rs index fdd06d224bcdafb3f055811676a78a0623b11d81..e10c5152473e243656b6c7c42e659a868988cad1 100644 --- reth/crates/optimism/rpc/src/eth/mod.rs +++ scroll-reth/crates/optimism/rpc/src/eth/mod.rs @@ -13,7 +13,7 @@ eth::{receipt::OpReceiptConverter, transaction::OpTxInfoMapper}, OpEthApiError, SequencerClient, }; use alloy_consensus::BlockHeader; -use alloy_primitives::U256; +use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; @@ -23,8 +23,8 @@ use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ - ExecutionPayloadBaseV1, FlashBlockCompleteSequenceRx, FlashBlockService, PendingBlockRx, - WsFlashBlockStream, + ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, + InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -43,10 +43,18 @@ use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc, time::Instant}; -use tokio::sync::watch; +use std::{ + fmt::{self, Formatter}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; +use tokio::{sync::watch, time}; use tracing::info;   +/// Maximum duration to wait for a fresh flashblock when one is being built. +const MAX_FLASHBLOCK_WAIT_DURATION: Duration = Duration::from_millis(50); + /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend<N, Rpc> = EthApiInner<N, Rpc>;   @@ -79,6 +87,7 @@ sequencer_client: Option<SequencerClient>, min_suggested_priority_fee: U256, pending_block_rx: Option<PendingBlockRx<N::Primitives>>, flashblock_rx: Option<FlashBlockCompleteSequenceRx>, + in_progress_rx: Option<InProgressFlashBlockRx>, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, @@ -86,6 +95,7 @@ sequencer_client, min_suggested_priority_fee, pending_block_rx, flashblock_rx, + in_progress_rx, }); Self { inner } } @@ -109,15 +119,57 @@ pub fn flashblock_rx(&self) -> Option<FlashBlockCompleteSequenceRx> { self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe()) }   + /// Returns information about the flashblock currently being built, if any. + fn flashblock_build_info(&self) -> Option<FlashBlockBuildInfo> { + self.inner.in_progress_rx.as_ref().and_then(|rx| *rx.borrow()) + } + + /// Extracts pending block if it matches the expected parent hash. + fn extract_matching_block( + &self, + block: Option<&PendingFlashBlock<N::Primitives>>, + parent_hash: B256, + ) -> Option<PendingBlock<N::Primitives>> { + block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) + } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. pub const fn builder() -> OpEthApiBuilder<Rpc> { OpEthApiBuilder::new() }   + /// Awaits a fresh flashblock if one is being built, otherwise returns current. + async fn flashblock( + &self, + parent_hash: B256, + ) -> eyre::Result<Option<PendingBlock<N::Primitives>>> { + let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; + + // Check if a flashblock is being built + if let Some(build_info) = self.flashblock_build_info() { + let current_index = rx.borrow().as_ref().map(|b| b.last_flashblock_index); + + // Check if this is the first flashblock or the next consecutive index + let is_next_index = current_index.is_none_or(|idx| build_info.index == idx + 1); + + // Wait only for relevant flashblocks: matching parent and next in sequence + if build_info.parent_hash == parent_hash && is_next_index { + let mut rx_clone = rx.clone(); + // Wait up to MAX_FLASHBLOCK_WAIT_DURATION for a new flashblock to arrive + let _ = time::timeout(MAX_FLASHBLOCK_WAIT_DURATION, rx_clone.changed()).await; + } + } + + // Fall back to current block + Ok(self.extract_matching_block(rx.borrow().as_ref(), parent_hash)) + } + /// Returns a [`PendingBlock`] that is built out of flashblocks. /// /// If flashblocks receiver is not set, then it always returns `None`. - pub fn pending_flashblock(&self) -> eyre::Result<Option<PendingBlock<N::Primitives>>> + /// + /// It may wait up to 50ms for a fresh flashblock if one is currently being built. + pub async fn pending_flashblock(&self) -> eyre::Result<Option<PendingBlock<N::Primitives>>> where OpEthApiError: FromEvmError<N::Evm>, Rpc: RpcConvert<Primitives = N::Primitives>, @@ -128,21 +180,7 @@ PendingBlockEnvOrigin::ActualPending(..) => return Ok(None), PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent, };   - let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; - let pending_block = rx.borrow(); - let Some(pending_block) = pending_block.as_ref() else { return Ok(None) }; - - let now = Instant::now(); - - // Is the pending block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && - parent.hash() == pending_block.block().parent_hash() && - now <= pending_block.expires_at - { - return Ok(Some(pending_block.pending.clone())); - } - - Ok(None) + self.flashblock(parent.hash()).await } }   @@ -252,8 +290,12 @@ self.inner.eth_api.fee_history_cache() }   async fn suggested_priority_fee(&self) -> Result<U256, Self::Error> { - let min_tip = U256::from(self.inner.min_suggested_priority_fee); - self.inner.eth_api.gas_oracle().op_suggest_tip_cap(min_tip).await.map_err(Into::into) + self.inner + .eth_api + .gas_oracle() + .op_suggest_tip_cap(self.inner.min_suggested_priority_fee) + .await + .map_err(Into::into) } }   @@ -330,6 +372,8 @@ /// Flashblocks receiver. /// /// If set, then it provides sequences of flashblock built. flashblock_rx: Option<FlashBlockCompleteSequenceRx>, + /// Receiver that signals when a flashblock is being built + in_progress_rx: Option<InProgressFlashBlockRx>, }   impl<N: RpcNodeCore, Rpc: RpcConvert> fmt::Debug for OpEthApiInner<N, Rpc> { @@ -465,24 +509,28 @@ } else { None };   - let rxs = if let Some(ws_url) = flashblocks_url { - info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); - let (tx, pending_block_rx) = watch::channel(None); - let stream = WsFlashBlockStream::new(ws_url); - let service = FlashBlockService::new( - stream, - ctx.components.evm_config().clone(), - ctx.components.provider().clone(), - ctx.components.task_executor().clone(), - ); - let flashblock_rx = service.subscribe_block_sequence(); - ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - Some((pending_block_rx, flashblock_rx)) - } else { - None - }; + let (pending_block_rx, flashblock_rx, in_progress_rx) = + if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service");   - let (pending_block_rx, flashblock_rx) = rxs.unzip(); + let (tx, pending_rx) = watch::channel(None); + let stream = WsFlashBlockStream::new(ws_url); + let service = FlashBlockService::new( + stream, + ctx.components.evm_config().clone(), + ctx.components.provider().clone(), + ctx.components.task_executor().clone(), + ); + + let flashblock_rx = service.subscribe_block_sequence(); + let in_progress_rx = service.subscribe_in_progress(); + + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); + + (Some(pending_rx), Some(flashblock_rx), Some(in_progress_rx)) + } else { + (None, None, None) + };   let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner();   @@ -492,6 +540,7 @@ sequencer_client, U256::from(min_suggested_priority_fee), pending_block_rx, flashblock_rx, + in_progress_rx, )) } }
diff --git reth/crates/optimism/rpc/src/eth/pending_block.rs scroll-reth/crates/optimism/rpc/src/eth/pending_block.rs index 8857b89b0210dae8efedd661d4e69bc196ffbf89..151668f40397a317646a425fb1aaffec4db794c7 100644 --- reth/crates/optimism/rpc/src/eth/pending_block.rs +++ scroll-reth/crates/optimism/rpc/src/eth/pending_block.rs @@ -42,7 +42,7 @@ /// Returns the locally built pending block async fn local_pending_block( &self, ) -> Result<Option<BlockAndReceipts<Self::Primitives>>, Self::Error> { - if let Ok(Some(pending)) = self.pending_flashblock() { + if let Ok(Some(pending)) = self.pending_flashblock().await { return Ok(Some(pending.into_block_and_receipts())); }   @@ -70,7 +70,7 @@ async fn local_pending_state(&self) -> Result<Option<StateProviderBox>, Self::Error> where Self: SpawnBlocking, { - let Ok(Some(pending_block)) = self.pending_flashblock() else { + let Ok(Some(pending_block)) = self.pending_flashblock().await else { return Ok(None); };
diff --git reth/crates/optimism/rpc/src/eth/receipt.rs scroll-reth/crates/optimism/rpc/src/eth/receipt.rs index 97fe3a0b5b7dedae8c331f239cc0381bc9909387..775e79d5aff0109f10461dfc1dbc0786a29b2fec 100644 --- reth/crates/optimism/rpc/src/eth/receipt.rs +++ scroll-reth/crates/optimism/rpc/src/eth/receipt.rs @@ -458,10 +458,11 @@ let tx_1 = OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap();   - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::from(2)); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::from(2)), + ..Default::default() + };   let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) @@ -481,10 +482,11 @@ let tx_1 = OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap();   - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::ZERO); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::ZERO), + ..Default::default() + };   let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info)
diff --git reth/crates/optimism/rpc/src/eth/transaction.rs scroll-reth/crates/optimism/rpc/src/eth/transaction.rs index fb98569db1090d7bbf398f69eefe020e06329b95..aa7e8ea60bdce18b7b453b1f9613be46c1e0a449 100644 --- reth/crates/optimism/rpc/src/eth/transaction.rs +++ scroll-reth/crates/optimism/rpc/src/eth/transaction.rs @@ -127,7 +127,7 @@ futures::future::pending().await } } => { // Check flashblocks for faster confirmation (Optimism-specific) - if let Ok(Some(pending_block)) = this.pending_flashblock() { + if let Ok(Some(pending_block)) = this.pending_flashblock().await { let block_and_receipts = pending_block.into_block_and_receipts(); if block_and_receipts.block.body().contains_transaction(&hash) && let Some(receipt) = this.transaction_receipt(hash).await? { @@ -168,7 +168,7 @@ let tx_receipt = this.load_transaction_and_receipt(hash).await?;   if tx_receipt.is_none() { // if flashblocks are supported, attempt to find id from the pending block - if let Ok(Some(pending_block)) = this.pending_flashblock() { + if let Ok(Some(pending_block)) = this.pending_flashblock().await { let block_and_receipts = pending_block.into_block_and_receipts(); if let Some((tx, receipt)) = block_and_receipts.find_transaction_and_receipt_by_hash(hash)
diff --git reth/crates/optimism/rpc/src/lib.rs scroll-reth/crates/optimism/rpc/src/lib.rs index 1c9b5d1c39e30cb1b6e0cca428e804ff1952669e..10f8ad5dccd0814a032417ae4231a3d1abe62caf 100644 --- reth/crates/optimism/rpc/src/lib.rs +++ scroll-reth/crates/optimism/rpc/src/lib.rs @@ -12,6 +12,7 @@ pub mod engine; pub mod error; pub mod eth; pub mod historical; +pub mod metrics; pub mod miner; pub mod sequencer; pub mod witness; @@ -21,4 +22,5 @@ pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; +pub use metrics::SequencerMetrics; pub use sequencer::SequencerClient;
diff --git reth/crates/optimism/rpc/src/metrics.rs scroll-reth/crates/optimism/rpc/src/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..5aa5e3eff3d6eae85143f30b9af6f02d404bbefb --- /dev/null +++ scroll-reth/crates/optimism/rpc/src/metrics.rs @@ -0,0 +1,21 @@ +//! RPC metrics unique for OP-stack. + +use core::time::Duration; +use metrics::Histogram; +use reth_metrics::Metrics; + +/// Optimism sequencer metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.sequencer")] +pub struct SequencerMetrics { + /// How long it takes to forward a transaction to the sequencer + pub(crate) sequencer_forward_latency: Histogram, +} + +impl SequencerMetrics { + /// Records the duration it took to forward a transaction + #[inline] + pub fn record_forward_latency(&self, duration: Duration) { + self.sequencer_forward_latency.record(duration.as_secs_f64()); + } +}
diff --git reth/crates/optimism/rpc/src/sequencer.rs scroll-reth/crates/optimism/rpc/src/sequencer.rs index c3b543638bb631afbe389f67876f487031ecdc01..8fc8c1b389d5eb2315b8e9fadfe6baba58f1e938 100644 --- reth/crates/optimism/rpc/src/sequencer.rs +++ scroll-reth/crates/optimism/rpc/src/sequencer.rs @@ -1,12 +1,11 @@ //! Helpers for optimism specific RPC implementations.   -use crate::SequencerClientError; +use crate::{SequencerClientError, SequencerMetrics}; use alloy_json_rpc::{RpcRecv, RpcSend}; use alloy_primitives::{hex, B256}; use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client}; use alloy_rpc_types_eth::erc4337::TransactionConditional; use alloy_transport_http::Http; -use reth_optimism_txpool::supervisor::metrics::SequencerMetrics; use std::{str::FromStr, sync::Arc, time::Instant}; use thiserror::Error; use tracing::warn; @@ -121,6 +120,7 @@ Ok(Self { inner: Arc::new(inner) }) }   /// Returns the network of the client + #[allow(clippy::missing_const_for_fn)] pub fn endpoint(&self) -> &str { &self.inner.sequencer_endpoint }
diff --git reth/crates/optimism/txpool/src/supervisor/metrics.rs scroll-reth/crates/optimism/txpool/src/supervisor/metrics.rs index 23eec8430257169247a076cc7a2ac50e5bc6aa8d..cb51a52bfc56b53c53a1f3519d18894176552b67 100644 --- reth/crates/optimism/txpool/src/supervisor/metrics.rs +++ scroll-reth/crates/optimism/txpool/src/supervisor/metrics.rs @@ -1,4 +1,4 @@ -//! Optimism supervisor and sequencer metrics +//! Optimism supervisor metrics   use crate::supervisor::InteropTxValidatorError; use op_alloy_rpc_types::SuperchainDAError; @@ -70,19 +70,3 @@ } } } } - -/// Optimism sequencer metrics -#[derive(Metrics, Clone)] -#[metrics(scope = "optimism_transaction_pool.sequencer")] -pub struct SequencerMetrics { - /// How long it takes to forward a transaction to the sequencer - pub(crate) sequencer_forward_latency: Histogram, -} - -impl SequencerMetrics { - /// Records the duration it took to forward a transaction - #[inline] - pub fn record_forward_latency(&self, duration: Duration) { - self.sequencer_forward_latency.record(duration.as_secs_f64()); - } -}
diff --git reth/crates/primitives-traits/Cargo.toml scroll-reth/crates/primitives-traits/Cargo.toml index 58d52bddb03dde433bb461a0e3138d7502a2b9a9..7537df42772021f79a195dda7764bfd653a8ad61 100644 --- reth/crates/primitives-traits/Cargo.toml +++ scroll-reth/crates/primitives-traits/Cargo.toml @@ -22,9 +22,14 @@ alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["k256"] } alloy-rlp.workspace = true alloy-trie.workspace = true -revm-primitives.workspace = true + +# revm +revm-primitives = { workspace = true, default-features = false } revm-bytecode.workspace = true revm-state.workspace = true + +# scroll +scroll-alloy-consensus = { workspace = true, optional = true, features = ["k256"] }   # op op-alloy-consensus = { workspace = true, optional = true, features = ["k256"] } @@ -93,6 +98,7 @@ "reth-chainspec/std", "revm-bytecode/std", "revm-state/std", "alloy-rpc-types-eth?/std", + "scroll-alloy-consensus?/std", ] secp256k1 = ["alloy-consensus/secp256k1"] test-utils = [ @@ -116,6 +122,7 @@ "op-alloy-consensus?/arbitrary", "alloy-trie/arbitrary", "reth-chainspec/arbitrary", "alloy-rpc-types-eth?/arbitrary", + "scroll-alloy-consensus?/arbitrary", ] serde-bincode-compat = [ "serde", @@ -126,6 +133,7 @@ "op-alloy-consensus?/serde", "op-alloy-consensus?/serde-bincode-compat", "alloy-genesis/serde-bincode-compat", "alloy-rpc-types-eth?/serde-bincode-compat", + "scroll-alloy-consensus?/serde-bincode-compat", ] serde = [ "dep:serde", @@ -144,16 +152,19 @@ "revm-bytecode/serde", "revm-state/serde", "rand_08/serde", "alloy-rpc-types-eth?/serde", + "scroll-alloy-consensus?/serde", ] reth-codec = [ "dep:reth-codecs", "dep:modular-bitfield", "dep:byteorder", + "scroll-alloy-consensus?/reth-codec", ] op = [ "dep:op-alloy-consensus", "reth-codecs?/op", ] +scroll-alloy-traits = ["scroll-alloy-consensus"] rayon = [ "dep:rayon", ]
diff --git reth/crates/primitives-traits/src/block/header.rs scroll-reth/crates/primitives-traits/src/block/header.rs index c4df8ecf542ffecce04b1ff19bcdd5df0af0b94c..38a218d11d1cd4ebe1d208591ab65248e6b58039 100644 --- reth/crates/primitives-traits/src/block/header.rs +++ scroll-reth/crates/primitives-traits/src/block/header.rs @@ -1,7 +1,7 @@ //! Block header data primitive.   use crate::{InMemorySize, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat}; -use alloy_primitives::Sealable; +use alloy_primitives::{Bytes, Sealable}; use core::{fmt, hash::Hash};   /// Re-exported alias @@ -33,7 +33,20 @@ + MaybeSerde + MaybeSerdeBincodeCompat + AsRef<Self> + 'static + + BlockHeaderMut { }   impl BlockHeader for alloy_consensus::Header {} + +/// Returns a mutable reference to fields of the header. +pub trait BlockHeaderMut { + /// Mutable reference to the extra data. + fn extra_data_mut(&mut self) -> &mut Bytes; +} + +impl BlockHeaderMut for alloy_consensus::Header { + fn extra_data_mut(&mut self) -> &mut Bytes { + &mut self.extra_data + } +}
diff --git reth/crates/primitives-traits/src/block/recovered.rs scroll-reth/crates/primitives-traits/src/block/recovered.rs index d6bba9d11270cf083acea4b7be0600195db40170..1e97efb5dc9dcc3685d085f2dcf23843ad7d41f0 100644 --- reth/crates/primitives-traits/src/block/recovered.rs +++ scroll-reth/crates/primitives-traits/src/block/recovered.rs @@ -78,6 +78,7 @@ Self { block: SealedBlock::new_unhashed(block), senders } }   /// Returns the recovered senders. + #[allow(clippy::missing_const_for_fn)] pub fn senders(&self) -> &[Address] { &self.senders }
diff --git reth/crates/primitives-traits/src/extended.rs scroll-reth/crates/primitives-traits/src/extended.rs index 4cba4b7d52d48b580100513a6cd98f15fcfcc6ab..da2bbc533aa2baacba3a768a7571db205a21d630 100644 --- reth/crates/primitives-traits/src/extended.rs +++ scroll-reth/crates/primitives-traits/src/extended.rs @@ -142,8 +142,8 @@ }   impl<B, T> SignerRecoverable for Extended<B, T> where - B: SignedTransaction + IsTyped2718, - T: SignedTransaction, + B: SignerRecoverable, + T: SignerRecoverable, { fn recover_signer(&self) -> Result<Address, RecoveryError> { delegate!(self => tx.recover_signer())
diff --git reth/crates/primitives-traits/src/serde_bincode_compat.rs scroll-reth/crates/primitives-traits/src/serde_bincode_compat.rs index 217ad5ff3320b447f0c246d67123a7c78ae8d930..fcfb7f681e7ea1cfddd33848d1a013bb89a77561 100644 --- reth/crates/primitives-traits/src/serde_bincode_compat.rs +++ scroll-reth/crates/primitives-traits/src/serde_bincode_compat.rs @@ -345,4 +345,17 @@ fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { repr.into() } } + + #[cfg(feature = "scroll-alloy-traits")] + impl SerdeBincodeCompat for scroll_alloy_consensus::ScrollTxEnvelope { + type BincodeRepr<'a> = scroll_alloy_consensus::serde_bincode_compat::ScrollTxEnvelope<'a>; + + fn as_repr(&self) -> Self::BincodeRepr<'_> { + self.into() + } + + fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { + repr.into() + } + } }
diff --git reth/crates/primitives-traits/src/size.rs scroll-reth/crates/primitives-traits/src/size.rs index 82c8b5d9c4339216eb641af38ef5fe5292de3614..66c388ee4605ed3a5ebbdd3ef128c35961151046 100644 --- reth/crates/primitives-traits/src/size.rs +++ scroll-reth/crates/primitives-traits/src/size.rs @@ -183,6 +183,49 @@ } } } } + +/// Implementations for scroll types. +#[cfg(feature = "scroll-alloy-traits")] +mod scroll { + use super::*; + use scroll_alloy_consensus::ScrollTxEnvelope; + + impl InMemorySize for scroll_alloy_consensus::ScrollTypedTransaction { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::L1Message(tx) => tx.size(), + } + } + } + + impl InMemorySize for scroll_alloy_consensus::ScrollPooledTransaction { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + } + } + } + + impl InMemorySize for ScrollTxEnvelope { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::L1Message(tx) => tx.size(), + } + } + } +} + #[cfg(test)] mod tests { use super::*;
diff --git reth/crates/primitives-traits/src/transaction/access_list.rs scroll-reth/crates/primitives-traits/src/transaction/access_list.rs index 06c033e36b0217a21696da2cfd6a1cf16005fd79..e4d5638f562cc6c25351ae5b722a623fe730355d 100644 --- reth/crates/primitives-traits/src/transaction/access_list.rs +++ scroll-reth/crates/primitives-traits/src/transaction/access_list.rs @@ -8,22 +8,11 @@ use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use proptest::proptest; use proptest_arbitrary_interop::arb; use reth_codecs::{add_arbitrary_tests, Compact}; - use serde::{Deserialize, Serialize};   /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// `AccessList` type natively #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodableWrapper, - RlpEncodableWrapper, - Serialize, - Deserialize, - Compact, + Clone, Debug, PartialEq, Eq, Default, RlpDecodableWrapper, RlpEncodableWrapper, Compact, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] @@ -36,22 +25,9 @@ } }   // This - #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodable, - RlpEncodable, - Serialize, - Deserialize, - Compact, - )] + #[derive(Clone, Debug, PartialEq, Eq, Default, RlpDecodable, RlpEncodable, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] - #[serde(rename_all = "camelCase")] struct RethAccessListItem { /// Account address that would be loaded at the start of execution address: Address,
diff --git reth/crates/primitives-traits/src/transaction/signed.rs scroll-reth/crates/primitives-traits/src/transaction/signed.rs index 08a6758d8d4ee565d1a25820ac5be8dd05e195ab..f6218e9fd4ba0094494d23715917b0f0c658c617 100644 --- reth/crates/primitives-traits/src/transaction/signed.rs +++ scroll-reth/crates/primitives-traits/src/transaction/signed.rs @@ -145,3 +145,13 @@ impl SignedTransaction for OpPooledTransaction {}   impl SignedTransaction for OpTxEnvelope {} } + +#[cfg(feature = "scroll-alloy-traits")] +mod scroll { + use super::*; + use scroll_alloy_consensus::{ScrollPooledTransaction, ScrollTxEnvelope}; + + impl SignedTransaction for ScrollPooledTransaction {} + + impl SignedTransaction for ScrollTxEnvelope {} +}
diff --git reth/crates/primitives/Cargo.toml scroll-reth/crates/primitives/Cargo.toml index 665dcab9a88006249be2639c20128f15e7fd302a..1717cc6ec3f655900823a1f94d6de5e5951069dc 100644 --- reth/crates/primitives/Cargo.toml +++ scroll-reth/crates/primitives/Cargo.toml @@ -14,7 +14,7 @@ [dependencies] # reth reth-ethereum-primitives = { workspace = true, features = ["serde"] } -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true
diff --git reth/crates/stages/api/src/pipeline/mod.rs scroll-reth/crates/stages/api/src/pipeline/mod.rs index 0a9aaef73de10b4b11b61e3d3a97fac046c496a0..2446219ea3d649b1a2a9ccb02a1cb5da53033693 100644 --- reth/crates/stages/api/src/pipeline/mod.rs +++ scroll-reth/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, BlockHashReader, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, DatabaseProviderFactory, ProviderFactory, - PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, + PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -31,7 +31,7 @@ StageError, StageExt, UnwindInput, }; pub use builder::*; use progress::*; -use reth_errors::RethResult; +use reth_errors::{ProviderResult, RethResult}; pub use set::*;   /// A container for a queued stage. @@ -101,12 +101,6 @@ { PipelineBuilder::default() }   - /// Return the minimum block number achieved by - /// any stage during the execution of the pipeline. - pub const fn minimum_block_number(&self) -> Option<u64> { - self.progress.minimum_block_number - } - /// Set tip for reverse sync. #[track_caller] pub fn set_tip(&self, tip: B256) { @@ -127,9 +121,7 @@ idx: usize, ) -> &mut dyn Stage<<ProviderFactory<N> as DatabaseProviderFactory>::ProviderRW> { &mut self.stages[idx] } -}   -impl<N: ProviderNodeTypes> Pipeline<N> { /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; @@ -285,6 +277,81 @@ .delete_limit(usize::MAX) .build_with_provider_factory(self.provider_factory.clone());   pruner.run(prune_tip)?; + } + + Ok(()) + } + + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less + /// than the checkpoint of the first stage). + /// + /// This will return the pipeline target if: + /// * the pipeline was interrupted during its previous run + /// * a new stage was added + /// * stage data was dropped manually through `reth stage drop ...` + /// + /// # Returns + /// + /// A target block hash if the pipeline is inconsistent, otherwise `None`. + pub fn initial_backfill_target(&self) -> ProviderResult<Option<B256>> { + let provider = self.provider_factory.provider()?; + + // If no target was provided, check if the stages are congruent - check if the + // checkpoint of the last stage matches the checkpoint of the first. + let first_stage_checkpoint = provider + .get_stage_checkpoint(self.stages.first().unwrap().id())? + .unwrap_or_default() + .block_number; + + // Skip the first stage as we've already retrieved it and comparing all other checkpoints + // against it. + for stage in self.stages.iter().skip(1) { + let stage_id = stage.id(); + + let stage_checkpoint = + provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; + + // If the checkpoint of any stage is less than the checkpoint of the first stage, + // retrieve and return the block hash of the latest header and use it as the target. + if stage_checkpoint < first_stage_checkpoint { + debug!( + target: "consensus::engine", + first_stage_checkpoint, + inconsistent_stage_id = %stage_id, + inconsistent_stage_checkpoint = stage_checkpoint, + "Pipeline sync progress is inconsistent" + ); + return provider.block_hash(first_stage_checkpoint); + } + } + + Ok(None) + } + + /// Checks for consistency between database and static files. If it fails, it unwinds to + /// the first block that's consistent between database and static files. + pub async fn ensure_static_files_consistency(&mut self) -> Result<(), PipelineError> { + let maybe_unwind_target = self + .provider_factory + .static_file_provider() + .check_consistency(&self.provider_factory.provider()?)?; + + self.move_to_static_files()?; + + if let Some(unwind_target) = maybe_unwind_target { + // Highly unlikely to happen, and given its destructive nature, it's better to panic + // instead. + assert_ne!( + unwind_target, + 0, + "A static file <> database inconsistency was found that would trigger an unwind to block 0" + ); + + info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); + + self.unwind(unwind_target, None).inspect_err(|err| { + error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") + })?; }   Ok(())
diff --git reth/crates/stages/stages/benches/criterion.rs scroll-reth/crates/stages/stages/benches/criterion.rs index 655b990f254899427633d6094321f06066733d04..1c03dc9257c27e7cae81b6d27aff4cd2b8cb20d1 100644 --- reth/crates/stages/stages/benches/criterion.rs +++ scroll-reth/crates/stages/stages/benches/criterion.rs @@ -5,6 +5,7 @@ use alloy_primitives::BlockNumber; use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; +use reth_ethereum_primitives::EthPrimitives; use reth_provider::{ test_utils::MockNodeTypesWithDB, DBProvider, DatabaseProvider, DatabaseProviderFactory, }; @@ -115,7 +116,10 @@ group.sample_size(10);   let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS);   - let stage = MerkleStage::Both { rebuild_threshold: u64::MAX, incremental_threshold: u64::MAX }; + let stage = MerkleStage::<EthPrimitives>::Both { + rebuild_threshold: u64::MAX, + incremental_threshold: u64::MAX, + }; measure_stage( runtime, &mut group, @@ -126,7 +130,8 @@ 1..=DEFAULT_NUM_BLOCKS, "Merkle-incremental".to_string(), );   - let stage = MerkleStage::Both { rebuild_threshold: 0, incremental_threshold: 0 }; + let stage = + MerkleStage::<EthPrimitives>::Both { rebuild_threshold: 0, incremental_threshold: 0 }; measure_stage( runtime, &mut group,
diff --git reth/crates/stages/stages/src/sets.rs scroll-reth/crates/stages/stages/src/sets.rs index 97c3a3116aa1784e6ccdf7e5e44c7eefeb2a347e..30dbd9281ddeb90026e66663c4a59ebfc8c733d7 100644 --- reth/crates/stages/stages/src/sets.rs +++ scroll-reth/crates/stages/stages/src/sets.rs @@ -320,18 +320,21 @@ where E: ConfigureEvm, ExecutionStages<E>: StageSet<Provider>, PruneSenderRecoveryStage: Stage<Provider>, - HashingStages: StageSet<Provider>, + HashingStages<E::Primitives>: StageSet<Provider>, HistoryIndexingStages: StageSet<Provider>, PruneStage: Stage<Provider>, { fn builder(self) -> StageSetBuilder<Provider> { - ExecutionStages::new(self.evm_config, self.consensus, self.stages_config.clone()) + ExecutionStages::new(self.evm_config, self.consensus.clone(), self.stages_config.clone()) .builder() // If sender recovery prune mode is set, add the prune sender recovery stage. .add_stage_opt(self.prune_modes.sender_recovery.map(|prune_mode| { PruneSenderRecoveryStage::new(prune_mode, self.stages_config.prune.commit_threshold) })) - .add_set(HashingStages { stages_config: self.stages_config.clone() }) + .add_set(HashingStages { + stages_config: self.stages_config.clone(), + consensus: self.consensus, + }) .add_set(HistoryIndexingStages { stages_config: self.stages_config.clone(), prune_modes: self.prune_modes.clone(), @@ -387,22 +390,25 @@ } }   /// A set containing all stages that hash account state. -#[derive(Debug, Default)] +#[derive(Debug)] #[non_exhaustive] -pub struct HashingStages { +pub struct HashingStages<P: NodePrimitives> { /// Configuration for each stage in the pipeline stages_config: StageConfig, + /// Consensus instance for validating blocks. + consensus: Arc<dyn FullConsensus<P, Error = ConsensusError>>, }   -impl<Provider> StageSet<Provider> for HashingStages +impl<Provider, P> StageSet<Provider> for HashingStages<P> where - MerkleStage: Stage<Provider>, + P: NodePrimitives, + MerkleStage<P>: Stage<Provider>, AccountHashingStage: Stage<Provider>, StorageHashingStage: Stage<Provider>, { fn builder(self) -> StageSetBuilder<Provider> { StageSetBuilder::default() - .add_stage(MerkleStage::default_unwind()) + .add_stage(MerkleStage::new_unwind(self.consensus.clone())) .add_stage(AccountHashingStage::new( self.stages_config.account_hashing, self.stages_config.etl.clone(), @@ -414,6 +420,7 @@ )) .add_stage(MerkleStage::new_execution( self.stages_config.merkle.rebuild_threshold, self.stages_config.merkle.incremental_threshold, + self.consensus, )) } }
diff --git reth/crates/stages/stages/src/stages/era.rs scroll-reth/crates/stages/stages/src/stages/era.rs index 436ee7696594a91d1a34d84297ef45a21aabf586..971bc11f8979998988dcdfc66e18573d6ac4044e 100644 --- reth/crates/stages/stages/src/stages/era.rs +++ scroll-reth/crates/stages/stages/src/stages/era.rs @@ -211,10 +211,16 @@ )?;   height } else { - input.target() + // It's possible for a pipeline sync to be executed with a None target, e.g. after a + // stage was manually dropped, and `reth node` is then called without a `--debug.tip`. + // + // In this case we don't want to simply default to zero, as that would overwrite the + // previously stored checkpoint block number. Instead we default to that previous + // checkpoint. + input.target.unwrap_or_else(|| input.checkpoint().block_number) };   - Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height == input.target() }) + Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height >= input.target() }) }   fn unwind(
diff --git reth/crates/stages/stages/src/stages/merkle.rs scroll-reth/crates/stages/stages/src/stages/merkle.rs index 6cbed3ab20eb16c0693c63929e8a0cfba67c2235..e6aa166d2a26a35bc4c379764052f32f35031ffe 100644 --- reth/crates/stages/stages/src/stages/merkle.rs +++ scroll-reth/crates/stages/stages/src/stages/merkle.rs @@ -1,25 +1,28 @@ -use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader}; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader as _}; use reth_codecs::Compact; -use reth_consensus::ConsensusError; use reth_db_api::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_primitives_traits::{GotExpected, SealedHeader}; +use reth_primitives_traits::BlockHeader; use reth_provider::{ DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, StatsReader, TrieWriter, }; use reth_stages_api::{ - BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, - StageCheckpoint, StageError, StageId, StorageRootMerkleCheckpoint, UnwindInput, UnwindOutput, + EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, StageCheckpoint, + StageError, StageId, StorageRootMerkleCheckpoint, UnwindInput, UnwindOutput, }; use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress, StoredSubNode}; use reth_trie_db::DatabaseStateRoot; -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; use tracing::*;   +use alloy_primitives::{BlockNumber, Sealable, B256}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator}; +use reth_primitives_traits::{NodePrimitives, SealedHeader}; +use reth_stages_api::BlockErrorKind; + // TODO: automate the process outlined below so the user can just send in a debugging package /// The error message that we include in invalid state root errors to tell users what information /// they should include in a bug report, since true state root errors can be impossible to debug @@ -70,7 +73,10 @@ /// - [`AccountHashingStage`][crate::stages::AccountHashingStage] /// - [`StorageHashingStage`][crate::stages::StorageHashingStage] /// - [`MerkleStage::Execution`] #[derive(Debug, Clone)] -pub enum MerkleStage { +pub enum MerkleStage<P> +where + P: NodePrimitives, +{ /// The execution portion of the merkle stage. Execution { // TODO: make struct for holding incremental settings, for code reuse between `Execution` @@ -82,9 +88,14 @@ /// The threshold (in number of blocks) to run the stage in incremental mode. The /// incremental mode will calculate the state root by calculating the new state root for /// some number of blocks, repeating until we reach the desired block number. incremental_threshold: u64, + /// Consensus. + consensus: Arc<dyn Consensus<P::Block, Error = ConsensusError>>, }, /// The unwind portion of the merkle stage. - Unwind, + Unwind { + /// Consensus. + consensus: Arc<dyn Consensus<P::Block, Error = ConsensusError>>, + }, /// Able to execute and unwind. Used for tests #[cfg(any(test, feature = "test-utils"))] Both { @@ -98,23 +109,44 @@ incremental_threshold: u64, }, }   -impl MerkleStage { - /// Stage default for the [`MerkleStage::Execution`]. - pub const fn default_execution() -> Self { +impl<P> MerkleStage<P> +where + P: NodePrimitives, +{ + /// Stage default for the [`MerkleStage::Execution`] with the provided consensus. + pub const fn default_execution_with_consensus( + consensus: Arc<dyn Consensus<P::Block, Error = ConsensusError>>, + ) -> Self { Self::Execution { rebuild_threshold: MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, incremental_threshold: MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD, + consensus, } }   - /// Stage default for the [`MerkleStage::Unwind`]. - pub const fn default_unwind() -> Self { - Self::Unwind + /// Create new instance of [`MerkleStage::Execution`]. + pub const fn new_execution( + rebuild_threshold: u64, + incremental_threshold: u64, + consensus: Arc<dyn Consensus<P::Block, Error = ConsensusError>>, + ) -> Self { + Self::Execution { rebuild_threshold, incremental_threshold, consensus } }   - /// Create new instance of [`MerkleStage::Execution`]. - pub const fn new_execution(rebuild_threshold: u64, incremental_threshold: u64) -> Self { - Self::Execution { rebuild_threshold, incremental_threshold } + /// Create new instance of [`MerkleStage::Unwind`]. + pub const fn new_unwind( + consensus: Arc<dyn Consensus<P::Block, Error = ConsensusError>>, + ) -> Self { + Self::Unwind { consensus } + } + + /// Returns the consensus for the stage. + pub fn consensus(&self) -> Arc<dyn Consensus<P::Block, Error = ConsensusError>> { + match self { + Self::Execution { consensus, .. } | Self::Unwind { consensus } => consensus.clone(), + #[cfg(any(test, feature = "test-utils"))] + Self::Both { .. } => reth_consensus::noop::NoopConsensus::arc(), + } }   /// Gets the hashing progress @@ -152,7 +184,7 @@ Ok(provider.save_stage_checkpoint_progress(StageId::MerkleExecute, buf)?) } }   -impl<Provider> Stage<Provider> for MerkleStage +impl<Provider, P> Stage<Provider> for MerkleStage<P> where Provider: DBProvider<Tx: DbTxMut> + TrieWriter @@ -160,12 +192,13 @@ + StatsReader + HeaderProvider + StageCheckpointReader + StageCheckpointWriter, + P: NodePrimitives<BlockHeader = <Provider as HeaderProvider>::Header>, { /// Return the id of the stage fn id(&self) -> StageId { match self { Self::Execution { .. } => StageId::MerkleExecute, - Self::Unwind => StageId::MerkleUnwind, + Self::Unwind { .. } => StageId::MerkleUnwind, #[cfg(any(test, feature = "test-utils"))] Self::Both { .. } => StageId::Other("MerkleBoth"), } @@ -174,11 +207,11 @@ /// Execute the stage. fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> { let (threshold, incremental_threshold) = match self { - Self::Unwind => { + Self::Unwind { .. } => { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); - return Ok(ExecOutput::done(StageCheckpoint::new(input.target()))) + return Ok(ExecOutput::done(StageCheckpoint::new(input.target()))); } - Self::Execution { rebuild_threshold, incremental_threshold } => { + Self::Execution { rebuild_threshold, incremental_threshold, .. } => { (*rebuild_threshold, *incremental_threshold) } #[cfg(any(test, feature = "test-utils"))] @@ -344,7 +377,13 @@ // Reset the checkpoint self.save_execution_checkpoint(provider, None)?;   - validate_state_root(trie_root, SealedHeader::seal_slow(target_block), to_block)?; + // Ensure state root matches + validate_state_root( + self.consensus(), + trie_root, + SealedHeader::seal_slow(target_block), + to_block, + )?;   Ok(ExecOutput { checkpoint: StageCheckpoint::new(to_block) @@ -397,7 +436,13 @@ let target = provider .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?;   - validate_state_root(block_root, SealedHeader::seal_slow(target), input.unwind_to)?; + let consensus = self.consensus(); + validate_state_root( + consensus, + block_root, + SealedHeader::seal_slow(target), + input.unwind_to, + )?;   // Validation passed, apply unwind changes to the database. provider.write_trie_updates(&updates)?; @@ -421,21 +466,19 @@ /// Check that the computed state root matches the root in the expected header. #[inline] fn validate_state_root<H: BlockHeader + Sealable + Debug>( + consensus: Arc<dyn HeaderValidator<H>>, got: B256, expected: SealedHeader<H>, target_block: BlockNumber, ) -> Result<(), StageError> { - if got == expected.state_root() { - Ok(()) - } else { + consensus.validate_state_root(&*expected, got).inspect_err(|_|{ error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); - Err(StageError::Block { - error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( - GotExpected { got, expected: expected.state_root() }.into(), - )), + }).map_err(|err|{ + StageError::Block { + error: BlockErrorKind::Validation(err), block: Box::new(expected.block_with_parent()), - }) - } + } + }) }   #[cfg(test)] @@ -445,10 +488,11 @@ use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, TestRunnerError, TestStageDB, UnwindStageTestRunner, }; - use alloy_primitives::{keccak256, U256}; + use alloy_primitives::{keccak256, B256, U256}; use assert_matches::assert_matches; use reth_db_api::cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}; - use reth_primitives_traits::{SealedBlock, StorageEntry}; + use reth_ethereum_primitives::EthPrimitives; + use reth_primitives_traits::{SealedBlock, SealedHeader, StorageEntry}; use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_stages_api::StageUnitCheckpoint; use reth_static_file_types::StaticFileSegment; @@ -619,7 +663,7 @@ } }   impl StageTestRunner for MerkleTestRunner { - type S = MerkleStage; + type S = MerkleStage<EthPrimitives>;   fn db(&self) -> &TestStageDB { &self.db
diff --git reth/crates/stages/stages/src/stages/mod.rs scroll-reth/crates/stages/stages/src/stages/mod.rs index f9b2312f5abd8e3524402b11ff53bce3e5cdd837..7e57009e8081e124c0843b1a319492a233762053 100644 --- reth/crates/stages/stages/src/stages/mod.rs +++ scroll-reth/crates/stages/stages/src/stages/mod.rs @@ -72,9 +72,7 @@ ProviderFactory, ProviderResult, ReceiptProvider, StageCheckpointWriter, StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; - use reth_stages_api::{ - ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId, - }; + use reth_stages_api::{ExecInput, ExecutionStageThresholds, Stage, StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_receipt, BlockRangeParams, @@ -301,7 +299,7 @@ db: &TestStageDB, prune_count: usize, segment: StaticFileSegment, is_full_node: bool, - expected: Option<PipelineTarget>, + expected: Option<u64>, ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. @@ -323,11 +321,18 @@ }   // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. + let mut provider = db.factory.database_provider_ro().unwrap(); + if is_full_node { + provider.set_prune_modes(PruneModes { + receipts: Some(PruneMode::Full), + ..Default::default() + }); + } let mut static_file_provider = db.factory.static_file_provider(); static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert!(matches!( static_file_provider - .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), + .check_consistency(&provider), Ok(e) if e == expected )); } @@ -338,7 +343,7 @@ fn save_checkpoint_and_check( db: &TestStageDB, stage_id: StageId, checkpoint_block_number: BlockNumber, - expected: Option<PipelineTarget>, + expected: Option<u64>, ) { let provider_rw = db.factory.provider_rw().unwrap(); provider_rw @@ -349,18 +354,15 @@ assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false,), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); }   /// Inserts a dummy value at key and compare the check consistency result against the expected /// one. - fn update_db_and_check<T: Table<Key = u64>>( - db: &TestStageDB, - key: u64, - expected: Option<PipelineTarget>, - ) where + fn update_db_and_check<T: Table<Key = u64>>(db: &TestStageDB, key: u64, expected: Option<u64>) + where <T as Table>::Value: Default, { update_db_with_and_check::<T>(db, key, expected, &Default::default()); @@ -371,7 +373,7 @@ /// one. fn update_db_with_and_check<T: Table<Key = u64>>( db: &TestStageDB, key: u64, - expected: Option<PipelineTarget>, + expected: Option<u64>, value: &T::Value, ) { let provider_rw = db.factory.provider_rw().unwrap(); @@ -382,7 +384,7 @@ assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } @@ -393,7 +395,7 @@ let db = seed_data(90).unwrap(); let db_provider = db.factory.database_provider_ro().unwrap();   assert!(matches!( - db.factory.static_file_provider().check_consistency(&db_provider, false), + db.factory.static_file_provider().check_consistency(&db_provider), Ok(None) )); } @@ -415,7 +417,7 @@ &db, 1, StaticFileSegment::Receipts, archive_node, - Some(PipelineTarget::Unwind(88)), + Some(88), );   simulate_behind_checkpoint_corruption( @@ -423,7 +425,7 @@ &db, 3, StaticFileSegment::Headers, archive_node, - Some(PipelineTarget::Unwind(86)), + Some(86), ); }   @@ -472,7 +474,7 @@ Some(block) );   // When a checkpoint is ahead, we request a pipeline unwind. - save_checkpoint_and_check(&db, StageId::Headers, 91, Some(PipelineTarget::Unwind(block))); + save_checkpoint_and_check(&db, StageId::Headers, 91, Some(block)); }   #[test] @@ -485,7 +487,7 @@ .get_highest_static_file_block(StaticFileSegment::Headers) .unwrap();   // Creates a gap of one header: static_file <missing> db - update_db_and_check::<tables::Headers>(&db, current + 2, Some(PipelineTarget::Unwind(89))); + update_db_and_check::<tables::Headers>(&db, current + 2, Some(89));   // Fill the gap, and ensure no unwind is necessary. update_db_and_check::<tables::Headers>(&db, current + 1, None); @@ -504,7 +506,7 @@ // Creates a gap of one transaction: static_file <missing> db update_db_with_and_check::<tables::Transactions>( &db, current + 2, - Some(PipelineTarget::Unwind(89)), + Some(89), &TxLegacy::default().into_signed(Signature::test_signature()).into(), );   @@ -527,7 +529,7 @@ .get_highest_static_file_tx(StaticFileSegment::Receipts) .unwrap();   // Creates a gap of one receipt: static_file <missing> db - update_db_and_check::<tables::Receipts>(&db, current + 2, Some(PipelineTarget::Unwind(89))); + update_db_and_check::<tables::Receipts>(&db, current + 2, Some(89));   // Fill the gap, and ensure no unwind is necessary. update_db_and_check::<tables::Receipts>(&db, current + 1, None);
diff --git reth/crates/storage/codecs/derive/src/compact/mod.rs scroll-reth/crates/storage/codecs/derive/src/compact/mod.rs index 00f622be43eed472032835d9b30b031f008765c3..78be372c61c2d7e27f77ef326af4a49344e3a55f 100644 --- reth/crates/storage/codecs/derive/src/compact/mod.rs +++ scroll-reth/crates/storage/codecs/derive/src/compact/mod.rs @@ -188,7 +188,7 @@ /// length. pub fn get_bit_size(ftype: &str) -> u8 { match ftype { "TransactionKind" | "TxKind" | "bool" | "Option" | "Signature" => 1, - "TxType" | "OpTxType" => 2, + "TxType" | "OpTxType" | "ScrollTxType" => 2, "u64" | "BlockNumber" | "TxNumber" | "ChainId" | "NumTransactions" => 4, "u128" => 5, "U256" => 6,
diff --git reth/crates/storage/codecs/src/lib.rs scroll-reth/crates/storage/codecs/src/lib.rs index 67e5f32b07c7f024ce8e5c6b085e88cba776284e..1ac37966c2e1eb51d1ee9ef785c8701ee339546d 100644 --- reth/crates/storage/codecs/src/lib.rs +++ scroll-reth/crates/storage/codecs/src/lib.rs @@ -312,10 +312,9 @@ if len == 0 { return (None, buf) }   - let (len, mut buf) = decode_varuint(buf); + let (len, buf) = decode_varuint(buf);   - let (element, _) = T::from_compact(&buf[..len], len); - buf.advance(len); + let (element, buf) = T::from_compact(buf, len);   (Some(element), buf) }
diff --git reth/crates/storage/db-api/Cargo.toml scroll-reth/crates/storage/db-api/Cargo.toml index 3f7e5c7b1a7ea6ceffb789723529b8b11ad374c9..f8536bbb1ce707f8109bce12683bcbfe726b9430 100644 --- reth/crates/storage/db-api/Cargo.toml +++ scroll-reth/crates/storage/db-api/Cargo.toml @@ -27,8 +27,11 @@ alloy-primitives.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true   +# scroll +reth-scroll-primitives = { workspace = true, optional = true } + # optimism -reth-optimism-primitives = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, optional = true, features = ["serde", "reth-codec"] }   # codecs modular-bitfield.workspace = true @@ -84,6 +87,7 @@ "reth-stages-types/arbitrary", "alloy-consensus/arbitrary", "reth-optimism-primitives?/arbitrary", "reth-ethereum-primitives/arbitrary", + "reth-scroll-primitives?/arbitrary", ] op = [ "dep:reth-optimism-primitives", @@ -91,3 +95,4 @@ "reth-codecs/op", "reth-primitives-traits/op", ] bench = [] +scroll-alloy-traits = ["dep:reth-scroll-primitives", "reth-primitives-traits/scroll-alloy-traits"]
diff --git reth/crates/storage/db-api/src/models/mod.rs scroll-reth/crates/storage/db-api/src/models/mod.rs index 24951789f5de728556cd647b02ee03089baadb87..1100a80daa1b6eca80b4d36da6d333e89daf81c2 100644 --- reth/crates/storage/db-api/src/models/mod.rs +++ scroll-reth/crates/storage/db-api/src/models/mod.rs @@ -245,6 +245,14 @@ impl_compression_for_compact!(OpTransactionSigned, OpReceipt); }   +#[cfg(feature = "scroll-alloy-traits")] +mod scroll { + use super::*; + use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; + + impl_compression_for_compact!(ScrollTransactionSigned, ScrollReceipt); +} + macro_rules! impl_compression_fixed_compact { ($($name:tt),+) => { $(
diff --git reth/crates/storage/db-api/src/tables/mod.rs scroll-reth/crates/storage/db-api/src/tables/mod.rs index a5cb5ff477d61f2d8fc6ed4d78631550b9368f50..259b2d39b15da94e1228a535cb61f63e697573fa 100644 --- reth/crates/storage/db-api/src/tables/mod.rs +++ scroll-reth/crates/storage/db-api/src/tables/mod.rs @@ -531,7 +531,7 @@ pub enum ChainStateKey { /// Last finalized block key LastFinalizedBlock, /// Last safe block key - LastSafeBlockBlock, + LastSafeBlock, }   impl Encode for ChainStateKey { @@ -540,7 +540,7 @@ fn encode(self) -> Self::Encoded { match self { Self::LastFinalizedBlock => [0], - Self::LastSafeBlockBlock => [1], + Self::LastSafeBlock => [1], } } } @@ -549,7 +549,7 @@ impl Decode for ChainStateKey { fn decode(value: &[u8]) -> Result<Self, crate::DatabaseError> { match value { [0] => Ok(Self::LastFinalizedBlock), - [1] => Ok(Self::LastSafeBlockBlock), + [1] => Ok(Self::LastSafeBlock), _ => Err(crate::DatabaseError::Decode), } }
diff --git reth/crates/storage/db-api/src/tables/raw.rs scroll-reth/crates/storage/db-api/src/tables/raw.rs index 96208a25d56ba3e68d5e122ab72d544af91246ba..bbe33101e2392e13d351ed574e2b8bea3c7dd630 100644 --- reth/crates/storage/db-api/src/tables/raw.rs +++ scroll-reth/crates/storage/db-api/src/tables/raw.rs @@ -136,6 +136,7 @@ V::decompress(&self.value) }   /// Returns the raw value as seen on the database. + #[allow(clippy::missing_const_for_fn)] pub fn raw_value(&self) -> &[u8] { &self.value }
diff --git reth/crates/storage/db/Cargo.toml scroll-reth/crates/storage/db/Cargo.toml index 2dd2517acfd8d6edbfee68e279ae5a4aafa35a98..c6eead402dd6a1d61dbd146ab4c62b7ec3d9c683 100644 --- reth/crates/storage/db/Cargo.toml +++ scroll-reth/crates/storage/db/Cargo.toml @@ -96,6 +96,7 @@ op = [ "reth-db-api/op", "reth-primitives-traits/op", ] +scroll-alloy-traits = ["reth-db-api/scroll-alloy-traits", "reth-primitives-traits/scroll-alloy-traits"] disable-lock = []   [[bench]]
diff --git reth/crates/storage/db/src/lib.rs scroll-reth/crates/storage/db/src/lib.rs index a630672384779a21465e33e77de336ae7b72e52f..b81817c1bf59d6aafd359be9b2db58ce8ab4791b 100644 --- reth/crates/storage/db/src/lib.rs +++ scroll-reth/crates/storage/db/src/lib.rs @@ -108,6 +108,7 @@ self.db.as_ref().unwrap() }   /// Returns the path to the database. + #[allow(clippy::missing_const_for_fn)] pub fn path(&self) -> &Path { &self.path }
diff --git reth/crates/storage/libmdbx-rs/src/environment.rs scroll-reth/crates/storage/libmdbx-rs/src/environment.rs index 648526a7fc503e6396a3792756648382d99bbd89..f4a639cb341e95b8e3d1c6f04889e34ffbdcbbc7 100644 --- reth/crates/storage/libmdbx-rs/src/environment.rs +++ scroll-reth/crates/storage/libmdbx-rs/src/environment.rs @@ -60,12 +60,14 @@ }   /// Returns true if the environment was opened as WRITEMAP. #[inline] + #[allow(clippy::missing_const_for_fn)] pub fn is_write_map(&self) -> bool { self.inner.env_kind.is_write_map() }   /// Returns the kind of the environment. #[inline] + #[allow(clippy::missing_const_for_fn)] pub fn env_kind(&self) -> EnvironmentKind { self.inner.env_kind } @@ -84,6 +86,7 @@ }   /// Returns the transaction manager. #[inline] + #[allow(clippy::missing_const_for_fn)] pub(crate) fn txn_manager(&self) -> &TxnManager { &self.inner.txn_manager } @@ -131,6 +134,7 @@ /// /// The caller **must** ensure that the pointer is never dereferenced after the environment has /// been dropped. #[inline] + #[allow(clippy::missing_const_for_fn)] pub(crate) fn env_ptr(&self) -> *mut ffi::MDBX_env { self.inner.env }
diff --git reth/crates/storage/libmdbx-rs/src/transaction.rs scroll-reth/crates/storage/libmdbx-rs/src/transaction.rs index e47e71ac261754cd289d2a847a13159465d10006..ab53448cbc79d395b45bff7b7d05d28f63f01c4e 100644 --- reth/crates/storage/libmdbx-rs/src/transaction.rs +++ scroll-reth/crates/storage/libmdbx-rs/src/transaction.rs @@ -128,11 +128,13 @@ /// Returns a copy of the raw pointer to the underlying MDBX transaction. #[doc(hidden)] #[cfg(test)] + #[allow(clippy::missing_const_for_fn)] pub fn txn(&self) -> *mut ffi::MDBX_txn { self.inner.txn.txn }   /// Returns a raw pointer to the MDBX environment. + #[allow(clippy::missing_const_for_fn)] pub fn env(&self) -> &Environment { &self.inner.env }
diff --git reth/crates/storage/nippy-jar/src/lib.rs scroll-reth/crates/storage/nippy-jar/src/lib.rs index 4f6b4df000613705f024abb41a0841e608ac7e8f..1b47d595c4f03d11e6fc0169a49cd52cd0193700 100644 --- reth/crates/storage/nippy-jar/src/lib.rs +++ scroll-reth/crates/storage/nippy-jar/src/lib.rs @@ -413,6 +413,7 @@ &self.data_mmap[range] }   /// Returns total size of data + #[allow(clippy::missing_const_for_fn)] pub fn size(&self) -> usize { self.data_mmap.len() }
diff --git reth/crates/storage/nippy-jar/src/writer.rs scroll-reth/crates/storage/nippy-jar/src/writer.rs index cf899791eedf92299ca549c980a44a023e5d83de..bece0bf87484808c346ce5a29f591a7cfb247904 100644 --- reth/crates/storage/nippy-jar/src/writer.rs +++ scroll-reth/crates/storage/nippy-jar/src/writer.rs @@ -430,6 +430,7 @@ }   /// Returns a reference to the offsets vector. #[cfg(test)] + #[allow(clippy::missing_const_for_fn)] pub fn offsets(&self) -> &[u64] { &self.offsets }
diff --git reth/crates/storage/provider/Cargo.toml scroll-reth/crates/storage/provider/Cargo.toml index 82a3726c43ea313805f6b8ed33cf6ddb0eecb601..e8599a8970673b1c41ff22a49a2c44bd7f07da91 100644 --- reth/crates/storage/provider/Cargo.toml +++ scroll-reth/crates/storage/provider/Cargo.toml @@ -29,7 +29,6 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true -reth-evm.workspace = true reth-chain-state.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true @@ -90,7 +89,6 @@ "reth-chain-state/test-utils", "reth-ethereum-engine-primitives", "reth-ethereum-primitives/test-utils", "reth-chainspec/test-utils", - "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-api/test-utils",
diff --git reth/crates/storage/provider/src/providers/blockchain_provider.rs scroll-reth/crates/storage/provider/src/providers/blockchain_provider.rs index 890b98124a526f7cb141ae0ad60c42b478df15c1..7040032eca060e07bb40404dd6c5130472ebded4 100644 --- reth/crates/storage/provider/src/providers/blockchain_provider.rs +++ scroll-reth/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,48 +1,34 @@ -#![allow(unused)] use crate::{ providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, - ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, - DatabaseProviderFactory, FullProvider, HashedPostStateProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, -}; -use alloy_consensus::{transaction::TransactionMeta, Header}; -use alloy_eips::{ - eip4895::{Withdrawal, Withdrawals}, - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, + StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::transaction::TransactionMeta; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, MemoryOverlayStateProvider, }; -use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db_api::{ - models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}, - transaction::DbTx, - Database, -}; -use reth_ethereum_primitives::{Block, EthPrimitives, Receipt, TransactionSigned}; -use reth_evm::{ConfigureEvm, EvmEnv}; +use reth_chainspec::ChainInfo; +use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::ExecutionOutcome; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_primitives_traits::{ - Account, BlockBody, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - BlockBodyIndicesProvider, DBProvider, NodePrimitivesProvider, StorageChangeSetReader, -}; +use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{HashedPostState, KeccakKeyHasher}; use revm_database::BundleState; use std::{ - ops::{Add, RangeBounds, RangeInclusive, Sub}, + ops::{RangeBounds, RangeInclusive}, sync::Arc, time::Instant, }; @@ -716,6 +702,14 @@ block_number: BlockNumber, ) -> ProviderResult<Vec<AccountBeforeTx>> { self.consistent_provider()?.account_block_changeset(block_number) } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult<Option<AccountBeforeTx>> { + self.consistent_provider()?.get_account_before_block(block_number, address) + } }   impl<N: ProviderNodeTypes> AccountReader for BlockchainProvider<N> { @@ -753,8 +747,7 @@ test_utils::{ create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, - StaticFileWriter, + BlockWriter, CanonChainTracker, ProviderFactory, }; use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; @@ -765,22 +758,12 @@ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, NewCanonicalChain, }; - use reth_chainspec::{ - ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, - }; - use reth_db_api::{ - cursor::DbCursorRO, - models::{AccountBeforeTx, StoredBlockBodyIndices}, - tables, - transaction::DbTx, - }; + use reth_chainspec::{ChainSpec, MAINNET}; + use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; - use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; + use reth_ethereum_primitives::{Block, Receipt}; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives_traits::{ - BlockBody, RecoveredBlock, SealedBlock, SignedTransaction, SignerRecoverable, - }; - use reth_static_file_types::StaticFileSegment; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DBProvider, DatabaseProviderFactory, @@ -793,9 +776,8 @@ random_receipt, BlockParams, BlockRangeParams, }; use revm_database::{BundleState, OriginalValuesKnown}; use std::{ - ops::{Bound, Deref, Range, RangeBounds}, + ops::{Bound, Range, RangeBounds}, sync::Arc, - time::Instant, };   const TEST_BLOCKS_COUNT: usize = 5; @@ -2272,7 +2254,7 @@ };   // Invalid/Non-existent argument should return `None` { - call_method!($arg_count, provider, $method, |_,_,_,_| ( ($invalid_args, None)), tx_num, tx_hash, &in_memory_blocks[0], &receipts); + call_method!($arg_count, provider, $method, |_,_,_,_| ($invalid_args, None), tx_num, tx_hash, &in_memory_blocks[0], &receipts); }   // Check that the item is only in memory and not in database @@ -2283,7 +2265,7 @@ let (args, expected_item) = $item_extractor(last_mem_block, tx_num(last_mem_block), tx_hash(last_mem_block), &receipts); call_method!($arg_count, provider, $method, |_,_,_,_| (args.clone(), expected_item), tx_num, tx_hash, last_mem_block, &receipts);   // Ensure the item is not in storage - call_method!($arg_count, provider.database, $method, |_,_,_,_| ( (args, None)), tx_num, tx_hash, last_mem_block, &receipts); + call_method!($arg_count, provider.database, $method, |_,_,_,_| (args, None), tx_num, tx_hash, last_mem_block, &receipts); } )* }}; @@ -2586,14 +2568,15 @@ // storage. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[1].number); let to_be_persisted_tx = in_memory_blocks[1].body().transactions[0].clone();   - assert!(matches!( + assert_eq!( correct_transaction_hash_fn( *to_be_persisted_tx.tx_hash(), provider.canonical_in_memory_state(), provider.database - ), - Ok(Some(to_be_persisted_tx)) - )); + ) + .unwrap(), + Some(to_be_persisted_tx) + ); }   Ok(())
diff --git reth/crates/storage/provider/src/providers/consistent.rs scroll-reth/crates/storage/provider/src/providers/consistent.rs index 03615d5357bbe6564c4c10097e57915b432551e1..93415e8e347b15e73dbf2afa13fc9f70e722815f 100644 --- reth/crates/storage/provider/src/providers/consistent.rs +++ scroll-reth/crates/storage/provider/src/providers/consistent.rs @@ -1422,6 +1422,52 @@ self.storage_provider.account_block_changeset(block_number) } } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult<Option<AccountBeforeTx>> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + // Search in-memory state for the account changeset + let changeset = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .to_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .find(|(addr, _)| addr == &address) + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }); + Ok(changeset) + } else { + // Perform checks on whether or not changesets exist for the block. + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + // Delegate to the storage provider for database lookups + self.storage_provider.get_account_before_block(block_number, address) + } + } }   impl<N: ProviderNodeTypes> AccountReader for ConsistentProvider<N> {
diff --git reth/crates/storage/provider/src/providers/database/mod.rs scroll-reth/crates/storage/provider/src/providers/database/mod.rs index 54642a947572e1f566c357829969c341bd9de444..f7b3c4ba603c04c4d04b796744ce5156a40ccc37 100644 --- reth/crates/storage/provider/src/providers/database/mod.rs +++ scroll-reth/crates/storage/provider/src/providers/database/mod.rs @@ -111,6 +111,11 @@ /// Consumes Self and returns DB pub fn into_db(self) -> N::DB { self.db } + + /// Returns reference to the prune modes. + pub const fn prune_modes_ref(&self) -> &PruneModes { + &self.prune_modes + } }   impl<N: NodeTypesWithDB<DB = Arc<DatabaseEnv>>> ProviderFactory<N> {
diff --git reth/crates/storage/provider/src/providers/database/provider.rs scroll-reth/crates/storage/provider/src/providers/database/provider.rs index 16b463be1e80a2388b81b141a89beccca981e6fa..23278592c31d108d3d44390cc154ef47804059c4 100644 --- reth/crates/storage/provider/src/providers/database/provider.rs +++ scroll-reth/crates/storage/provider/src/providers/database/provider.rs @@ -47,8 +47,7 @@ }; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ - Account, Block as _, BlockBody as _, Bytecode, GotExpected, RecoveredBlock, SealedHeader, - StorageEntry, + Account, Block as _, BlockBody as _, Bytecode, RecoveredBlock, SealedHeader, StorageEntry, }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, @@ -59,7 +58,7 @@ use reth_storage_api::{ BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, }; -use reth_storage_errors::provider::{ProviderResult, RootMismatch}; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, updates::{StorageTrieUpdates, TrieUpdates}, @@ -338,7 +337,7 @@ }   // Unwind account history indices. self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); + let storage_range = BlockNumberAddress::range(range);   let changed_storages = self .tx @@ -370,29 +369,11 @@ account_prefix_set: account_prefix_set.freeze(), storage_prefix_sets, destroyed_accounts, }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) + let (_, trie_updates) = StateRoot::from_tx(&self.tx) .with_prefix_sets(prefix_sets) .root_with_updates() .map_err(reth_db_api::DatabaseError::from)?;   - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root(); - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } self.write_trie_updates(&trie_updates)?;   Ok(()) @@ -557,6 +538,7 @@ &self.tx }   /// Returns a reference to the chain specification. + #[allow(clippy::missing_const_for_fn)] pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } @@ -939,6 +921,19 @@ Ok(account_before) }) .collect() } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult<Option<AccountBeforeTx>> { + self.tx + .cursor_dup_read::<tables::AccountChangeSets>()? + .seek_by_key_subkey(block_number, address)? + .filter(|acc| acc.address == address) + .map(Ok) + .transpose() + } }   impl<TX: DbTx + 'static, N: NodeTypesForProvider> HeaderSyncGapProvider @@ -1968,7 +1963,6 @@ // revert storages for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2067,7 +2061,6 @@ // revert storages for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2855,7 +2848,7 @@ fn last_safe_block_number(&self) -> ProviderResult<Option<BlockNumber>> { let mut finalized_blocks = self .tx .cursor_read::<tables::ChainState>()? - .walk(Some(tables::ChainStateKey::LastSafeBlockBlock))? + .walk(Some(tables::ChainStateKey::LastSafeBlock))? .take(1) .collect::<Result<BTreeMap<tables::ChainStateKey, BlockNumber>, _>>()?;   @@ -2872,9 +2865,7 @@ .put::<tables::ChainState>(tables::ChainStateKey::LastFinalizedBlock, block_number)?) }   fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()> { - Ok(self - .tx - .put::<tables::ChainState>(tables::ChainStateKey::LastSafeBlockBlock, block_number)?) + Ok(self.tx.put::<tables::ChainState>(tables::ChainStateKey::LastSafeBlock, block_number)?) } }
diff --git reth/crates/storage/provider/src/providers/mod.rs scroll-reth/crates/storage/provider/src/providers/mod.rs index ab54fe01e567cf1d53e51f706db3beaf14938869..56d27ea33612bb4aa1c18a1c3646612274bf353e 100644 --- reth/crates/storage/provider/src/providers/mod.rs +++ scroll-reth/crates/storage/provider/src/providers/mod.rs @@ -1,6 +1,6 @@ //! Contains the main provider types and traits for interacting with the blockchain's storage.   -use reth_chainspec::EthereumHardforks; +use reth_chainspec::EthereumCapabilities; use reth_db_api::table::Value; use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB};   @@ -34,7 +34,7 @@ /// [`ProviderNodeTypes`]. pub trait NodeTypesForProvider where Self: NodeTypes< - ChainSpec: EthereumHardforks, + ChainSpec: EthereumCapabilities, Storage: ChainStorage<Self::Primitives>, Primitives: FullNodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>, >, @@ -43,7 +43,7 @@ }   impl<T> NodeTypesForProvider for T where T: NodeTypes< - ChainSpec: EthereumHardforks, + ChainSpec: EthereumCapabilities, Storage: ChainStorage<T::Primitives>, Primitives: FullNodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>, >
diff --git reth/crates/storage/provider/src/providers/state/historical.rs scroll-reth/crates/storage/provider/src/providers/state/historical.rs index 9a22a527ccb9fbdc00fc622c35c64c4fddbdedf1..f3e69bf7d910cfab399e6ba6de13025de39d66c8 100644 --- reth/crates/storage/provider/src/providers/state/historical.rs +++ scroll-reth/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, + ChangeSetReader, HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; @@ -241,23 +241,23 @@ self.provider.tx_ref() } }   -impl<Provider: DBProvider + BlockNumReader> AccountReader +impl<Provider: DBProvider + BlockNumReader + ChangeSetReader> AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> { match self.account_history_lookup(*address)? { HistoryInfo::NotYetWritten => Ok(None), - HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx() - .cursor_dup_read::<tables::AccountChangeSets>()? - .seek_by_key_subkey(changeset_block_number, *address)? - .filter(|acc| &acc.address == address) - .ok_or(ProviderError::AccountChangesetNotFound { - block_number: changeset_block_number, - address: *address, - })? - .info), + HistoryInfo::InChangeset(changeset_block_number) => { + // Use ChangeSetReader trait method to get the account from changesets + self.provider + .get_account_before_block(changeset_block_number, *address)? + .ok_or(ProviderError::AccountChangesetNotFound { + block_number: changeset_block_number, + address: *address, + }) + .map(|account_before| account_before.info) + } HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { Ok(self.tx().get_by_encoded_key::<tables::PlainAccountState>(address)?) } @@ -394,7 +394,7 @@ HashedPostState::from_bundle_state::<KeccakKeyHasher>(bundle_state.state()) } }   -impl<Provider: DBProvider + BlockNumReader + BlockHashReader> StateProvider +impl<Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader> StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. @@ -485,7 +485,7 @@ } }   // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider<Provider> where [Provider: DBProvider + BlockNumReader + BlockHashReader ]); +delegate_provider_impls!(HistoricalStateProvider<Provider> where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]);   /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -530,7 +530,9 @@ transaction::{DbTx, DbTxMut}, BlockNumberList, }; use reth_primitives_traits::{Account, StorageEntry}; - use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, + }; use reth_storage_errors::provider::ProviderError;   const ADDRESS: Address = address!("0x0000000000000000000000000000000000000001"); @@ -540,7 +542,9 @@ b256!("0x0000000000000000000000000000000000000000000000000000000000000001");   const fn assert_state_provider<T: StateProvider>() {} #[expect(dead_code)] - const fn assert_historical_state_provider<T: DBProvider + BlockNumReader + BlockHashReader>() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + >() { assert_state_provider::<HistoricalStateProvider<T>>(); }
diff --git reth/crates/storage/provider/src/providers/static_file/manager.rs scroll-reth/crates/storage/provider/src/providers/static_file/manager.rs index 434d3836fb23fbad0059e5b57026db80e15de06c..9f91cb9d5f8414fee3d008467b05d48b250adbe2 100644 --- reth/crates/storage/provider/src/providers/static_file/manager.rs +++ scroll-reth/crates/storage/provider/src/providers/static_file/manager.rs @@ -37,7 +37,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; -use reth_stages_types::{PipelineTarget, StageId}; +use reth_stages_types::StageId; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, @@ -731,15 +731,14 @@ /// ([`TxNumber`] or [`BlockNumber`]). /// * its highest block should match the stage checkpoint block number if it's equal or higher /// than the corresponding database table last entry. /// - /// Returns a [`Option`] of [`PipelineTarget::Unwind`] if any healing is further required. + /// Returns a [`Option`] with block number to unwind to if any healing is further required. /// /// WARNING: No static file writer should be held before calling this function, otherwise it /// will deadlock. pub fn check_consistency<Provider>( &self, provider: &Provider, - has_receipt_pruning: bool, - ) -> ProviderResult<Option<PipelineTarget>> + ) -> ProviderResult<Option<BlockNumber>> where Provider: DBProvider + BlockReader + StageCheckpointReader + ChainSpecProvider, N: NodePrimitives<Receipt: Value, BlockHeader: Value, SignedTx: Value>, @@ -776,7 +775,7 @@ } };   for segment in StaticFileSegment::iter() { - if has_receipt_pruning && segment.is_receipts() { + if provider.prune_modes_ref().has_receipts_pruning() && segment.is_receipts() { // Pruned nodes (including full node) do not store receipts as static files. continue } @@ -887,7 +886,7 @@ update_unwind_target(unwind); } }   - Ok(unwind_target.map(PipelineTarget::Unwind)) + Ok(unwind_target) }   /// Checks consistency of the latest static file segment and throws an error if at fault. @@ -1213,6 +1212,7 @@ })) }   /// Returns directory where `static_files` are located. + #[allow(clippy::missing_const_for_fn)] pub fn directory(&self) -> &Path { &self.path } @@ -1303,12 +1303,14 @@ }   /// Returns `static_files` directory #[cfg(any(test, feature = "test-utils"))] + #[allow(clippy::missing_const_for_fn)] pub fn path(&self) -> &Path { &self.path }   /// Returns `static_files` transaction index #[cfg(any(test, feature = "test-utils"))] + #[allow(clippy::missing_const_for_fn)] pub fn tx_index(&self) -> &RwLock<SegmentRanges> { &self.static_files_tx_index }
diff --git reth/crates/storage/provider/src/test_utils/mock.rs scroll-reth/crates/storage/provider/src/test_utils/mock.rs index d5e3fe4da7b0b5374e0dd351c74e3841a7568933..1024312ead9da3f9038f84c08e8d6a02eada3cc0 100644 --- reth/crates/storage/provider/src/test_utils/mock.rs +++ scroll-reth/crates/storage/provider/src/test_utils/mock.rs @@ -984,6 +984,14 @@ _block_number: BlockNumber, ) -> ProviderResult<Vec<AccountBeforeTx>> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult<Option<AccountBeforeTx>> { + Ok(None) + } }   impl<T: NodePrimitives, ChainSpec: Send + Sync> StateReader for MockEthProvider<T, ChainSpec> {
diff --git reth/crates/storage/rpc-provider/src/lib.rs scroll-reth/crates/storage/rpc-provider/src/lib.rs index 76e511d52d4f86c8fa9b48041bd3f9fc88c763bb..ed6e49eefbdf0cac1296ed86edb40aa8c9e4bab9 100644 --- reth/crates/storage/rpc-provider/src/lib.rs +++ scroll-reth/crates/storage/rpc-provider/src/lib.rs @@ -1764,6 +1764,14 @@ _block_number: BlockNumber, ) -> Result<Vec<reth_db_api::models::AccountBeforeTx>, ProviderError> { Err(ProviderError::UnsupportedProvider) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult<Option<reth_db_api::models::AccountBeforeTx>> { + Err(ProviderError::UnsupportedProvider) + } }   impl<P, Node, N> StateProviderFactory for RpcBlockchainStateProvider<P, Node, N>
diff --git reth/crates/storage/storage-api/src/account.rs scroll-reth/crates/storage/storage-api/src/account.rs index 1692c4c21f401bfc6e91999e16880b6867ec18eb..270bfd1226c936b5dffe6e09fff6dc629941346d 100644 --- reth/crates/storage/storage-api/src/account.rs +++ scroll-reth/crates/storage/storage-api/src/account.rs @@ -54,4 +54,13 @@ fn account_block_changeset( &self, block_number: BlockNumber, ) -> ProviderResult<Vec<AccountBeforeTx>>; + + /// Search the block's changesets for the given address, and return the result. + /// + /// Returns `None` if the account was not changed in this block. + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult<Option<AccountBeforeTx>>; }
diff --git reth/crates/storage/storage-api/src/base_fee.rs scroll-reth/crates/storage/storage-api/src/base_fee.rs new file mode 100644 index 0000000000000000000000000000000000000000..35415a613716aad0a104759430ac2621a0c0e4e6 --- /dev/null +++ scroll-reth/crates/storage/storage-api/src/base_fee.rs @@ -0,0 +1,57 @@ +use crate::{StateProvider, StateProviderBox}; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, U256}; +use reth_chainspec::EthChainSpec; +use reth_storage_errors::ProviderError; +use revm_database::{Database, State}; + +/// An instance of the trait can return the base fee for the next block. +pub trait BaseFeeProvider<P: StorageProvider> { + /// Returns the base fee for the next block. + fn next_block_base_fee<H: BlockHeader>( + &self, + provider: &mut P, + parent_header: &H, + ts: u64, + ) -> Result<u64, P::Error>; +} + +impl<T: EthChainSpec, P: StorageProvider> BaseFeeProvider<P> for T { + fn next_block_base_fee<H: BlockHeader>( + &self, + _provider: &mut P, + parent_header: &H, + ts: u64, + ) -> Result<u64, P::Error> { + Ok(parent_header + .next_block_base_fee(self.base_fee_params_at_timestamp(ts)) + .unwrap_or_default()) + } +} + +/// A storage provider trait that can be implemented on foreign types. +pub trait StorageProvider { + /// The error type. + type Error; + + /// Returns the storage value at the address for the provided key. + fn storage(&mut self, address: Address, key: U256) -> Result<U256, Self::Error>; +} + +impl<DB: Database> StorageProvider for State<DB> { + type Error = DB::Error; + + fn storage(&mut self, address: Address, key: U256) -> Result<U256, Self::Error> { + let _ = self.load_cache_account(address)?; + Database::storage(self, address, key) + } +} + +impl StorageProvider for StateProviderBox { + type Error = ProviderError; + + fn storage(&mut self, address: Address, key: U256) -> Result<U256, Self::Error> { + Ok(StateProvider::storage(self, address, key.into())?.unwrap_or_default()) + } +}
diff --git reth/crates/storage/storage-api/src/chain.rs scroll-reth/crates/storage/storage-api/src/chain.rs index 63e6bdba73873ab12e2685793ee4de723cd6c74e..a30fd8d4a8a70ed721f30cd548939b9a8b46972f 100644 --- reth/crates/storage/storage-api/src/chain.rs +++ scroll-reth/crates/storage/storage-api/src/chain.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use alloy_consensus::Header; use alloy_primitives::BlockNumber; use core::marker::PhantomData; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthereumCapabilities, EthereumHardforks}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, models::StoredBlockOmmers, @@ -141,7 +141,7 @@ }   impl<Provider, T, H> BlockBodyReader<Provider> for EthStorage<T, H> where - Provider: DBProvider + ChainSpecProvider<ChainSpec: EthereumHardforks>, + Provider: DBProvider + ChainSpecProvider<ChainSpec: EthereumCapabilities>, T: SignedTransaction, H: FullBlockHeader, { @@ -162,7 +162,7 @@ for (header, transactions) in inputs { // If we are past shanghai, then all blocks should have a withdrawal list, // even if empty - let withdrawals = if chain_spec.is_shanghai_active_at_timestamp(header.timestamp()) { + let withdrawals = if chain_spec.withdrawals_active(header.timestamp()) { withdrawals_cursor .seek_exact(header.number())? .map(|(_, w)| w.withdrawals)
diff --git reth/crates/storage/storage-api/src/lib.rs scroll-reth/crates/storage/storage-api/src/lib.rs index 897802da98009e1ed47d155382a6be5b8b865fdb..49dcfd56582bd3f3d5915dd813f1282edee9af04 100644 --- reth/crates/storage/storage-api/src/lib.rs +++ scroll-reth/crates/storage/storage-api/src/lib.rs @@ -82,6 +82,9 @@ mod primitives; pub use primitives::*;   +mod base_fee; +pub use base_fee::*; + mod block_indices; pub use block_indices::*;
diff --git reth/crates/storage/storage-api/src/noop.rs scroll-reth/crates/storage/storage-api/src/noop.rs index 44e499ae00610f45706c1ebf60739b6ef56d5284..e0c57d5226b75d09f25d6dd50d4b6a2de3286667 100644 --- reth/crates/storage/storage-api/src/noop.rs +++ scroll-reth/crates/storage/storage-api/src/noop.rs @@ -399,6 +399,14 @@ _block_number: BlockNumber, ) -> ProviderResult<Vec<AccountBeforeTx>> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult<Option<AccountBeforeTx>> { + Ok(None) + } }   impl<C: Send + Sync, N: NodePrimitives> StateRootProvider for NoopProvider<C, N> {
diff --git reth/crates/trie/parallel/Cargo.toml scroll-reth/crates/trie/parallel/Cargo.toml index c9f625a1500bc6140ec992763e220ccb9f09e1cd..b4463d9ede3b6fbf0b79112614be7a36be8d5033 100644 --- reth/crates/trie/parallel/Cargo.toml +++ scroll-reth/crates/trie/parallel/Cargo.toml @@ -36,6 +36,7 @@ derive_more.workspace = true rayon.workspace = true itertools.workspace = true tokio = { workspace = true, features = ["rt-multi-thread"] } +crossbeam-channel.workspace = true   # `metrics` feature reth-metrics = { workspace = true, optional = true }
diff --git reth/crates/trie/parallel/src/proof.rs scroll-reth/crates/trie/parallel/src/proof.rs index d6e1b57ed9ba50d544992a2208359a1512b25270..ffa7aa4dc31cbadb6d764ae2402e6b07adec466c 100644 --- reth/crates/trie/parallel/src/proof.rs +++ scroll-reth/crates/trie/parallel/src/proof.rs @@ -1,39 +1,19 @@ use crate::{ metrics::ParallelTrieMetrics, - proof_task::{ProofTaskKind, ProofTaskManagerHandle, StorageProofInput}, + proof_task::{AccountMultiproofInput, ProofWorkerHandle, StorageProofInput}, root::ParallelStateRootError, - stats::ParallelTrieTracker, StorageRootTargets, }; -use alloy_primitives::{ - map::{B256Map, B256Set, HashMap}, - B256, -}; -use alloy_rlp::{BufMut, Encodable}; +use alloy_primitives::{map::B256Set, B256}; use dashmap::DashMap; -use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderError, -}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSetsMut}, - proof::StorageProof, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets, TriePrefixSetsMut}, updates::TrieUpdatesSorted, - walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, - MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, -}; -use reth_trie_common::{ - added_removed_keys::MultiAddedRemovedKeys, - proof::{DecodedProofNodes, ProofRetainer}, + DecodedMultiProof, DecodedStorageMultiProof, HashedPostStateSorted, MultiProofTargets, Nibbles, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; use std::sync::{mpsc::Receiver, Arc}; use tracing::trace;   @@ -42,9 +22,7 @@ /// /// This can collect proof for many targets in parallel, spawning a task for each hashed address /// that has proof targets. #[derive(Debug)] -pub struct ParallelProof<Factory: DatabaseProviderFactory> { - /// Consistent view of the database. - view: ConsistentDbView<Factory>, +pub struct ParallelProof { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc<TrieUpdatesSorted>, @@ -58,8 +36,8 @@ /// Flag indicating whether to include branch node masks in the proof. collect_branch_node_masks: bool, /// Provided by the user to give the necessary context to retain extra proofs. multi_added_removed_keys: Option<Arc<MultiAddedRemovedKeys>>, - /// Handle to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle<FactoryTx<Factory>>, + /// Handle to the proof worker pools. + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. missed_leaves_storage_roots: Arc<DashMap<B256, B256>>, @@ -67,25 +45,23 @@ #[cfg(feature = "metrics")] metrics: ParallelTrieMetrics, }   -impl<Factory: DatabaseProviderFactory> ParallelProof<Factory> { +impl ParallelProof { /// Create new state proof generator. pub fn new( - view: ConsistentDbView<Factory>, nodes_sorted: Arc<TrieUpdatesSorted>, state_sorted: Arc<HashedPostStateSorted>, prefix_sets: Arc<TriePrefixSetsMut>, missed_leaves_storage_roots: Arc<DashMap<B256, B256>>, - storage_proof_task_handle: ProofTaskManagerHandle<FactoryTx<Factory>>, + proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - view, nodes_sorted, state_sorted, prefix_sets, missed_leaves_storage_roots, collect_branch_node_masks: false, multi_added_removed_keys: None, - storage_proof_task_handle, + proof_worker_handle, #[cfg(feature = "metrics")] metrics: ParallelTrieMetrics::new_with_labels(&[("type", "proof")]), } @@ -106,19 +82,16 @@ ) -> Self { self.multi_added_removed_keys = multi_added_removed_keys; self } -} - -impl<Factory> ParallelProof<Factory> -where - Factory: DatabaseProviderFactory<Provider: BlockReader> + Clone + 'static, -{ /// Queues a storage proof task and returns a receiver for the result. fn queue_storage_proof( &self, hashed_address: B256, prefix_set: PrefixSet, target_slots: B256Set, - ) -> Receiver<Result<DecodedStorageMultiProof, ParallelStateRootError>> { + ) -> Result< + Receiver<Result<DecodedStorageMultiProof, ParallelStateRootError>>, + ParallelStateRootError, + > { let input = StorageProofInput::new( hashed_address, prefix_set, @@ -127,10 +100,9 @@ self.collect_branch_node_masks, self.multi_added_removed_keys.clone(), );   - let (sender, receiver) = std::sync::mpsc::channel(); - let _ = - self.storage_proof_task_handle.queue_task(ProofTaskKind::StorageProof(input, sender)); - receiver + self.proof_worker_handle + .queue_storage_proof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string())) }   /// Generate a storage multiproof according to the specified targets and hashed address. @@ -150,7 +122,7 @@ ?hashed_address, "Starting storage proof generation" );   - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); + let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots)?; let proof_result = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( format!("channel closed for {hashed_address}"), @@ -167,16 +139,16 @@ proof_result }   - /// Generate a state multiproof according to specified targets. - pub fn decoded_multiproof( - self, - targets: MultiProofTargets, - ) -> Result<DecodedMultiProof, ParallelStateRootError> { - let mut tracker = ParallelTrieTracker::default(); - - // Extend prefix sets with targets - let mut prefix_sets = (*self.prefix_sets).clone(); - prefix_sets.extend(TriePrefixSetsMut { + /// Extends prefix sets with the given multiproof targets and returns the frozen result. + /// + /// This is a helper function used to prepare prefix sets before computing multiproofs. + /// Returns frozen (immutable) prefix sets ready for use in proof computation. + pub fn extend_prefix_sets_with_targets( + base_prefix_sets: &TriePrefixSetsMut, + targets: &MultiProofTargets, + ) -> TriePrefixSets { + let mut extended = base_prefix_sets.clone(); + extended.extend(TriePrefixSetsMut { account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), storage_prefix_sets: targets .iter() @@ -187,13 +159,21 @@ }) .collect(), destroyed_accounts: Default::default(), }); - let prefix_sets = prefix_sets.freeze(); + extended.freeze() + } + + /// Generate a state multiproof according to specified targets. + pub fn decoded_multiproof( + self, + targets: MultiProofTargets, + ) -> Result<DecodedMultiProof, ParallelStateRootError> { + // Extend prefix sets with targets + let prefix_sets = Self::extend_prefix_sets_with_targets(&self.prefix_sets, &targets);   - let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets.clone(), + let storage_root_targets_len = StorageRootTargets::count( + &prefix_sets.account_prefix_set, + &prefix_sets.storage_prefix_sets, ); - let storage_root_targets_len = storage_root_targets.len();   trace!( target: "trie::parallel_proof", @@ -201,150 +181,31 @@ total_targets = storage_root_targets_len, "Starting parallel proof generation" );   - // Pre-calculate storage roots for accounts which were changed. - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + // Queue account multiproof request to account worker pool   - // stores the receiver for the storage proof outcome for the hashed addresses - // this way we can lazily await the outcome when we iterate over the map - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(storage_root_targets.len(), Default::default()); - - for (hashed_address, prefix_set) in - storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) - { - let target_slots = targets.get(&hashed_address).cloned().unwrap_or_default(); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); + let input = AccountMultiproofInput { + targets, + prefix_sets, + collect_branch_node_masks: self.collect_branch_node_masks, + multi_added_removed_keys: self.multi_added_removed_keys.clone(), + missed_leaves_storage_roots: self.missed_leaves_storage_roots.clone(), + };   - // store the receiver for that result with the hashed address so we can await this in - // place when we iterate over the trie - storage_proof_receivers.insert(hashed_address, receiver); - } + let receiver = self + .proof_worker_handle + .dispatch_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?;   - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &self.nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &self.state_sorted, - ); + // Wait for account multiproof result from worker + let (multiproof, stats) = receiver.recv().map_err(|_| { + ParallelStateRootError::Other( + "Account multiproof channel dropped: worker died or pool shutdown".to_string(), + ) + })??;   - let accounts_added_removed_keys = - self.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - - // Create the walker. - let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, - ) - .with_added_removed_keys(accounts_added_removed_keys) - .with_deletions_retained(true); - - // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = targets - .keys() - .map(Nibbles::unpack) - .collect::<ProofRetainer>() - .with_added_removed_keys(accounts_added_removed_keys); - let mut hash_builder = HashBuilder::default() - .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_masks); - - // Initialize all storage multiproofs as empty. - // Storage multiproofs for non empty tries will be overwritten if necessary. - let mut collected_decoded_storages: B256Map<DecodedStorageMultiProof> = - targets.keys().map(|key| (*key, DecodedStorageMultiProof::empty())).collect(); - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_node_iter = TrieNodeIter::state_trie( - walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - while let Some(account_node) = - account_node_iter.try_next().map_err(ProviderError::Database)? - { - match account_node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let root = match storage_proof_receivers.remove(&hashed_address) { - Some(rx) => { - let decoded_storage_multiproof = rx.recv().map_err(|e| { - ParallelStateRootError::StorageRoot(StorageRootError::Database( - DatabaseError::Other(format!( - "channel closed for {hashed_address}: {e}" - )), - )) - })??; - let root = decoded_storage_multiproof.root; - collected_decoded_storages - .insert(hashed_address, decoded_storage_multiproof); - root - } - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - - match self.missed_leaves_storage_roots.entry(hashed_address) { - dashmap::Entry::Occupied(occ) => *occ.get(), - dashmap::Entry::Vacant(vac) => { - let root = StorageProof::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - targets.get(&hashed_address).cloned().unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - StorageRootError::Database(DatabaseError::Other( - e.to_string(), - )), - ) - })? - .root; - vac.insert(root); - root - } - } - } - }; - - // Encode account - account_rlp.clear(); - let account = account.into_trie_account(root); - account.encode(&mut account_rlp as &mut dyn BufMut); - - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } - } - } - let _ = hash_builder.root(); - - let stats = tracker.finish(); #[cfg(feature = "metrics")] self.metrics.record(stats);   - let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); - let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - - let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { - let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); - ( - updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), - updated_branch_nodes - .into_iter() - .map(|(path, node)| (path, node.tree_mask)) - .collect(), - ) - } else { - (HashMap::default(), HashMap::default()) - }; - trace!( target: "trie::parallel_proof", total_targets = storage_root_targets_len, @@ -356,28 +217,26 @@ precomputed_storage_roots = stats.precomputed_storage_roots(), "Calculated decoded proof" );   - Ok(DecodedMultiProof { - account_subtree: decoded_account_subtree, - branch_node_hash_masks, - branch_node_tree_masks, - storages: collected_decoded_storages, - }) + Ok(multiproof) } }   #[cfg(test)] mod tests { use super::*; - use crate::proof_task::{ProofTaskCtx, ProofTaskManager}; + use crate::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use alloy_primitives::{ keccak256, - map::{B256Set, DefaultHashBuilder}, + map::{B256Set, DefaultHashBuilder, HashMap}, Address, U256, }; use rand::Rng; use reth_primitives_traits::{Account, StorageEntry}; - use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; use reth_trie::proof::Proof; + use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use tokio::runtime::Runtime;   #[test] @@ -447,21 +306,15 @@ let rt = Runtime::new().unwrap();   let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let proof_task = - ProofTaskManager::new(rt.handle().clone(), consistent_view.clone(), task_ctx, 1); - let proof_task_handle = proof_task.handle(); - - // keep the join handle around to make sure it does not return any errors - // after we compute the state root - let join_handle = rt.spawn_blocking(move || proof_task.run()); + let proof_worker_handle = + ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1);   let parallel_result = ParallelProof::new( - consistent_view, Default::default(), Default::default(), Default::default(), Default::default(), - proof_task_handle.clone(), + proof_worker_handle.clone(), ) .decoded_multiproof(targets.clone()) .unwrap(); @@ -489,9 +342,7 @@ // then compare the entire thing for any mask differences assert_eq!(parallel_result, sequential_result_decoded);   - // drop the handle to terminate the task and then block on the proof task handle to make - // sure it does not return any errors - drop(proof_task_handle); - rt.block_on(join_handle).unwrap().expect("The proof task should not return an error"); + // Workers shut down automatically when handle is dropped + drop(proof_worker_handle); } }
diff --git reth/crates/trie/parallel/src/proof_task.rs scroll-reth/crates/trie/parallel/src/proof_task.rs index 9bb96d4b19e04a7e76bf6070a1df82cd8a8fa88a..5c26f6d99c37b6091e67feef1afe2366a2f72788 100644 --- reth/crates/trie/parallel/src/proof_task.rs +++ scroll-reth/crates/trie/parallel/src/proof_task.rs @@ -1,217 +1,624 @@ -//! A Task that manages sending proof requests to a number of tasks that have longer-running -//! database transactions. +//! Parallel proof computation using worker pools with dedicated database transactions. //! -//! The [`ProofTaskManager`] ensures that there are a max number of currently executing proof tasks, -//! and is responsible for managing the fixed number of database transactions created at the start -//! of the task. +//! +//! # Architecture +//! +//! - **Worker Pools**: Pre-spawned workers with dedicated database transactions +//! - Storage pool: Handles storage proofs and blinded storage node requests +//! - Account pool: Handles account multiproofs and blinded account node requests +//! - **Direct Channel Access**: [`ProofWorkerHandle`] provides type-safe queue methods with direct +//! access to worker channels, eliminating routing overhead +//! - **Automatic Shutdown**: Workers terminate gracefully when all handles are dropped //! //! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and //! [`HashedPostStateCursorFactory`], which are each backed by a database transaction.   -use crate::root::ParallelStateRootError; -use alloy_primitives::{map::B256Set, B256}; +use crate::{ + root::ParallelStateRootError, + stats::{ParallelTrieStats, ParallelTrieTracker}, + StorageRootTargets, +}; +use alloy_primitives::{ + map::{B256Map, B256Set}, + B256, +}; +use alloy_rlp::{BufMut, Encodable}; +use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use dashmap::DashMap; use reth_db_api::transaction::DbTx; -use reth_execution_errors::SparseTrieError; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderResult, + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, }; +use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, - prefix_set::TriePrefixSetsMut, + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::{TriePrefixSets, TriePrefixSetsMut}, proof::{ProofTrieNodeProviderFactory, StorageProof}, - trie_cursor::InMemoryTrieCursorFactory, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, - DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, + walker::TrieWalker, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, + MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSet, PrefixSetMut}, + proof::{DecodedProofNodes, ProofRetainer}, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ - collections::VecDeque, sync::{ - atomic::{AtomicUsize, Ordering}, - mpsc::{channel, Receiver, SendError, Sender}, + mpsc::{channel, Receiver, Sender}, Arc, }, time::Instant, }; use tokio::runtime::Handle; -use tracing::{debug, trace}; +use tracing::trace;   #[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskMetrics; +use crate::proof_task_metrics::ProofTaskTrieMetrics;   type StorageProofResult = Result<DecodedStorageMultiProof, ParallelStateRootError>; type TrieNodeProviderResult = Result<Option<RevealedNode>, SparseTrieError>; +type AccountMultiproofResult = + Result<(DecodedMultiProof, ParallelTrieStats), ParallelStateRootError>;   -/// A task that manages sending multiproof requests to a number of tasks that have longer-running -/// database transactions +/// Internal message for storage workers. #[derive(Debug)] -pub struct ProofTaskManager<Factory: DatabaseProviderFactory> { - /// Max number of database transactions to create - max_concurrency: usize, - /// Number of database transactions created - total_transactions: usize, - /// Consistent view provider used for creating transactions on-demand +enum StorageWorkerJob { + /// Storage proof computation request + StorageProof { + /// Storage proof input parameters + input: StorageProofInput, + /// Channel to send result back to original caller + result_sender: Sender<StorageProofResult>, + }, + /// Blinded storage node retrieval request + BlindedStorageNode { + /// Target account + account: B256, + /// Path to the storage node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender<TrieNodeProviderResult>, + }, +} + +/// Worker loop for storage trie operations. +/// +/// # Lifecycle +/// +/// Each worker: +/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn storage_worker_loop<Factory>( view: ConsistentDbView<Factory>, - /// Proof task context shared across all proof tasks task_ctx: ProofTaskCtx, - /// Proof tasks pending execution - pending_tasks: VecDeque<ProofTaskKind>, - /// The underlying handle from which to spawn proof tasks - executor: Handle, - /// The proof task transactions, containing owned cursor factories that are reused for proof - /// calculation. - proof_task_txs: Vec<ProofTaskTx<FactoryTx<Factory>>>, - /// A receiver for new proof tasks. - proof_task_rx: Receiver<ProofTaskMessage<FactoryTx<Factory>>>, - /// A sender for sending back transactions. - tx_sender: Sender<ProofTaskMessage<FactoryTx<Factory>>>, - /// The number of active handles. - /// - /// Incremented in [`ProofTaskManagerHandle::new`] and decremented in - /// [`ProofTaskManagerHandle::drop`]. - active_handles: Arc<AtomicUsize>, - /// Metrics tracking blinded node fetches. + work_rx: CrossbeamReceiver<StorageWorkerJob>, + worker_id: usize, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, +) where + Factory: DatabaseProviderFactory<Provider: BlockReader>, +{ + // Create db transaction before entering work loop + let provider = + view.provider_ro().expect("Storage worker failed to initialize: database unavailable"); + let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Storage worker started" + ); + + // Create factories once at worker startup to avoid recreation overhead. + let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); + + // Create blinded provider factory once for all blinded node requests + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + proof_tx.task_ctx.prefix_sets.clone(), + ); + + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; + + while let Ok(job) = work_rx.recv() { + match job { + StorageWorkerJob::StorageProof { input, result_sender } => { + let hashed_address = input.hashed_address; + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots = input.target_slots.len(), + "Processing storage proof" + ); + + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof( + input, + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + ); + + let proof_elapsed = proof_start.elapsed(); + storage_proofs_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + storage_proofs_processed, + "Storage proof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + "Storage proof completed" + ); + } + + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + "Processing blinded storage node" + ); + + let start = Instant::now(); + let result = + blinded_provider_factory.storage_node_provider(account).trie_node(&path); + let elapsed = start.elapsed(); + + storage_nodes_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); + } + } + } + + tracing::debug!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" + ); + #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics, + metrics.record_storage_nodes(storage_nodes_processed as usize); }   -impl<Factory: DatabaseProviderFactory> ProofTaskManager<Factory> { - /// Creates a new [`ProofTaskManager`] with the given max concurrency, creating that number of - /// cursor factories. - /// - /// Returns an error if the consistent view provider fails to create a read-only transaction. - pub fn new( - executor: Handle, - view: ConsistentDbView<Factory>, - task_ctx: ProofTaskCtx, - max_concurrency: usize, - ) -> Self { - let (tx_sender, proof_task_rx) = channel(); - Self { - max_concurrency, - total_transactions: 0, - view, - task_ctx, - pending_tasks: VecDeque::new(), - executor, - proof_task_txs: Vec::new(), - proof_task_rx, - tx_sender, - active_handles: Arc::new(AtomicUsize::new(0)), - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics::default(), +/// Worker loop for account trie operations. +/// +/// # Lifecycle +/// +/// Each worker: +/// 1. Receives `AccountWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn account_worker_loop<Factory>( + view: ConsistentDbView<Factory>, + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver<AccountWorkerJob>, + storage_work_tx: CrossbeamSender<StorageWorkerJob>, + worker_id: usize, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, +) where + Factory: DatabaseProviderFactory<Provider: BlockReader>, +{ + // Create db transaction before entering work loop + let provider = + view.provider_ro().expect("Account worker failed to initialize: database unavailable"); + let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); + + // Create factories once at worker startup to avoid recreation overhead. + let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); + + // Create blinded provider factory once for all blinded node requests + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + proof_tx.task_ctx.prefix_sets.clone(), + ); + + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; + + while let Ok(job) = work_rx.recv() { + match job { + AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + targets = input.targets.len(), + "Processing account multiproof" + ); + + let proof_start = Instant::now(); + let mut tracker = ParallelTrieTracker::default(); + + let mut storage_prefix_sets = + std::mem::take(&mut input.prefix_sets.storage_prefix_sets); + + let storage_root_targets_len = StorageRootTargets::count( + &input.prefix_sets.account_prefix_set, + &storage_prefix_sets, + ); + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + + let storage_proof_receivers = match dispatch_storage_proofs( + &storage_work_tx, + &input.targets, + &mut storage_prefix_sets, + input.collect_branch_node_masks, + input.multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + let _ = result_sender.send(Err(error)); + continue; + } + }; + + // Use the missed leaves cache passed from the multiproof manager + let missed_leaves_storage_roots = &input.missed_leaves_storage_roots; + + let account_prefix_set = std::mem::take(&mut input.prefix_sets.account_prefix_set); + + let ctx = AccountMultiproofParams { + targets: &input.targets, + prefix_set: account_prefix_set, + collect_branch_node_masks: input.collect_branch_node_masks, + multi_added_removed_keys: input.multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots, + }; + + let result = build_account_multiproof_with_storage_roots( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + ctx, + &mut tracker, + ); + + let proof_elapsed = proof_start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| (proof, stats)); + account_proofs_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + proof_time_us = proof_elapsed.as_micros(), + total_processed = account_proofs_processed, + "Account multiproof completed" + ); + } + + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?path, + "Processing blinded account node" + ); + + let start = Instant::now(); + let result = blinded_provider_factory.account_node_provider().trie_node(&path); + let elapsed = start.elapsed(); + + account_nodes_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + ?path, + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?path, + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + } } }   - /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. - pub fn handle(&self) -> ProofTaskManagerHandle<FactoryTx<Factory>> { - ProofTaskManagerHandle::new(self.tx_sender.clone(), self.active_handles.clone()) - } + tracing::debug!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); + + #[cfg(feature = "metrics")] + metrics.record_account_nodes(account_nodes_processed as usize); }   -impl<Factory> ProofTaskManager<Factory> +/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. +/// +/// This is a helper function used by account workers to build the account subtree proof +/// while storage proofs are still being computed. Receivers are consumed only when needed, +/// enabling interleaved parallelism between account trie traversal and storage proof computation. +/// +/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +fn build_account_multiproof_with_storage_roots<C, H>( + trie_cursor_factory: C, + hashed_cursor_factory: H, + ctx: AccountMultiproofParams<'_>, + tracker: &mut ParallelTrieTracker, +) -> Result<DecodedMultiProof, ParallelStateRootError> where - Factory: DatabaseProviderFactory<Provider: BlockReader> + 'static, + C: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, { - /// Inserts the task into the pending tasks queue. - pub fn queue_proof_task(&mut self, task: ProofTaskKind) { - self.pending_tasks.push_back(task); - } + let accounts_added_removed_keys = + ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts());   - /// Gets either the next available transaction, or creates a new one if all are in use and the - /// total number of transactions created is less than the max concurrency. - pub fn get_or_create_tx(&mut self) -> ProviderResult<Option<ProofTaskTx<FactoryTx<Factory>>>> { - if let Some(proof_task_tx) = self.proof_task_txs.pop() { - return Ok(Some(proof_task_tx)); - } + // Create the walker. + let walker = TrieWalker::<_>::state_trie( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + ctx.prefix_set, + ) + .with_added_removed_keys(accounts_added_removed_keys) + .with_deletions_retained(true);   - // if we can create a new tx within our concurrency limits, create one on-demand - if self.total_transactions < self.max_concurrency { - let provider_ro = self.view.provider_ro()?; - let tx = provider_ro.into_tx(); - self.total_transactions += 1; - return Ok(Some(ProofTaskTx::new(tx, self.task_ctx.clone(), self.total_transactions))); - } + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer = ctx + .targets + .keys() + .map(Nibbles::unpack) + .collect::<ProofRetainer>() + .with_added_removed_keys(accounts_added_removed_keys); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(ctx.collect_branch_node_masks);   - Ok(None) - } + // Initialize storage multiproofs map with pre-allocated capacity. + // Proofs will be inserted as they're consumed from receivers during trie walk. + let mut collected_decoded_storages: B256Map<DecodedStorageMultiProof> = + B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + let mut account_node_iter = TrieNodeIter::state_trie( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + );   - /// Spawns the next queued proof task on the executor with the given input, if there are any - /// transactions available. - /// - /// This will return an error if a transaction must be created on-demand and the consistent view - /// provider fails. - pub fn try_spawn_next(&mut self) -> ProviderResult<()> { - let Some(task) = self.pending_tasks.pop_front() else { return Ok(()) }; + let mut storage_proof_receivers = ctx.storage_proof_receivers;   - let Some(proof_task_tx) = self.get_or_create_tx()? else { - // if there are no txs available, requeue the proof task - self.pending_tasks.push_front(task); - return Ok(()) - }; - - let tx_sender = self.tx_sender.clone(); - self.executor.spawn_blocking(move || match task { - ProofTaskKind::StorageProof(input, sender) => { - proof_task_tx.storage_proof(input, sender, tx_sender); + while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } - ProofTaskKind::BlindedAccountNode(path, sender) => { - proof_task_tx.blinded_account_node(path, sender, tx_sender); - } - ProofTaskKind::BlindedStorageNode(account, path, sender) => { - proof_task_tx.blinded_storage_node(account, path, sender, tx_sender); - } - }); + TrieElement::Leaf(hashed_address, account) => { + let root = match storage_proof_receivers.remove(&hashed_address) { + Some(receiver) => { + // Block on this specific storage proof receiver - enables interleaved + // parallelism + let proof = receiver.recv().map_err(|_| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address}" + )), + ), + ) + })??; + let root = proof.root; + collected_decoded_storages.insert(hashed_address, proof); + root + } + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves();   - Ok(()) - } + match ctx.missed_leaves_storage_roots.entry(hashed_address) { + dashmap::Entry::Occupied(occ) => *occ.get(), + dashmap::Entry::Vacant(vac) => { + let root = StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + ctx.targets.get(&hashed_address).cloned().unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root;   - /// Loops, managing the proof tasks, and sending new tasks to the executor. - pub fn run(mut self) -> ProviderResult<()> { - loop { - match self.proof_task_rx.recv() { - Ok(message) => match message { - ProofTaskMessage::QueueTask(task) => { - // Track metrics for blinded node requests - #[cfg(feature = "metrics")] - match &task { - ProofTaskKind::BlindedAccountNode(_, _) => { - self.metrics.account_nodes += 1; + vac.insert(root); + root } - ProofTaskKind::BlindedStorageNode(_, _, _) => { - self.metrics.storage_nodes += 1; - } - _ => {} } - // queue the task - self.queue_proof_task(task) - } - ProofTaskMessage::Transaction(tx) => { - // return the transaction to the pool - self.proof_task_txs.push(tx); } - ProofTaskMessage::Terminate => { - // Record metrics before terminating - #[cfg(feature = "metrics")] - self.metrics.record(); - return Ok(()) - } - }, - // All senders are disconnected, so we can terminate - // However this should never happen, as this struct stores a sender - Err(_) => return Ok(()), - }; + };   - // try spawning the next task - self.try_spawn_next()?; + // Encode account + account_rlp.clear(); + let account = account.into_trie_account(root); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + } } } + + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(Ok(proof)) = receiver.recv() { + collected_decoded_storages.insert(hashed_address, proof); + } + } + + let _ = hash_builder.root(); + + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; + + let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), + updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), + ) + } else { + (Default::default(), Default::default()) + }; + + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) +} + +/// Queues storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_storage_proofs( + storage_work_tx: &CrossbeamSender<StorageWorkerJob>, + targets: &MultiProofTargets, + storage_prefix_sets: &mut B256Map<PrefixSet>, + with_branch_node_masks: bool, + multi_added_removed_keys: Option<&Arc<MultiAddedRemovedKeys>>, +) -> Result<B256Map<Receiver<StorageProofResult>>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + + // Queue all storage proofs to worker pool + for (hashed_address, target_slots) in targets.iter() { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + + // Always queue a storage proof so we obtain the storage root even when no slots are + // requested. + let input = StorageProofInput::new( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); + + let (sender, receiver) = channel(); + + // If queuing fails, propagate error up (no fallback) + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, result_sender: sender }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {}: storage worker pool unavailable", + hashed_address + )) + })?; + + storage_proof_receivers.insert(*hashed_address, receiver); + } + + Ok(storage_proof_receivers) }   /// Type alias for the factory tuple returned by `create_factories` @@ -229,8 +636,7 @@ /// Trie updates, prefix sets, and state updates task_ctx: ProofTaskCtx,   - /// Identifier for the tx within the context of a single [`ProofTaskManager`], used only for - /// tracing. + /// Identifier for the worker within the worker pool, used only for tracing. id: usize, }   @@ -246,6 +652,7 @@ impl<Tx> ProofTaskTx<Tx> where Tx: DbTx, { + #[inline] fn create_factories(&self) -> ProofFactories<'_, Tx> { let trie_cursor_factory = InMemoryTrieCursorFactory::new( DatabaseTrieCursorFactory::new(&self.tx), @@ -260,172 +667,70 @@ (trie_cursor_factory, hashed_cursor_factory) }   - /// Calculates a storage proof for the given hashed address, and desired prefix set. - fn storage_proof( - self, + /// Compute storage proof with pre-created factories. + /// + /// Accepts cursor factories as parameters to allow reuse across multiple proofs. + /// Used by storage workers in the worker pool to avoid factory recreation + /// overhead on each proof computation. + #[inline] + fn compute_storage_proof( + &self, input: StorageProofInput, - result_sender: Sender<StorageProofResult>, - tx_sender: Sender<ProofTaskMessage<Tx>>, - ) { - trace!( - target: "trie::proof_task", - hashed_address=?input.hashed_address, - "Starting storage proof task calculation" - ); + trie_cursor_factory: impl TrieCursorFactory, + hashed_cursor_factory: impl HashedCursorFactory, + ) -> StorageProofResult { + // Consume the input so we can move large collections (e.g. target slots) without cloning. + let StorageProofInput { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } = input;   - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let multi_added_removed_keys = input - .multi_added_removed_keys - .unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); - let added_removed_keys = multi_added_removed_keys.get_storage(&input.hashed_address); + // Get or create added/removed keys context + let multi_added_removed_keys = + multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); + let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address);   let span = tracing::trace_span!( target: "trie::proof_task", "Storage proof calculation", - hashed_address=?input.hashed_address, - // Add a unique id because we often have parallel storage proof calculations for the - // same hashed address, and we want to differentiate them during trace analysis. - span_id=self.id, + hashed_address = ?hashed_address, + worker_id = self.id, ); - let span_guard = span.enter(); + let _span_guard = span.enter();   - let target_slots_len = input.target_slots.len(); let proof_start = Instant::now();   - let raw_proof_result = StorageProof::new_hashed( - trie_cursor_factory, - hashed_cursor_factory, - input.hashed_address, - ) - .with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().copied())) - .with_branch_node_masks(input.with_branch_node_masks) - .with_added_removed_keys(added_removed_keys) - .storage_multiproof(input.target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); + // Compute raw storage multiproof + let raw_proof_result = + StorageProof::new_hashed(trie_cursor_factory, hashed_cursor_factory, hashed_address) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) + .with_branch_node_masks(with_branch_node_masks) + .with_added_removed_keys(added_removed_keys) + .storage_multiproof(target_slots) + .map_err(|e| ParallelStateRootError::Other(e.to_string()));   - drop(span_guard); - + // Decode proof into DecodedStorageMultiProof let decoded_result = raw_proof_result.and_then(|raw_proof| { raw_proof.try_into().map_err(|e: alloy_rlp::Error| { ParallelStateRootError::Other(format!( "Failed to decode storage proof for {}: {}", - input.hashed_address, e + hashed_address, e )) }) });   trace!( target: "trie::proof_task", - hashed_address=?input.hashed_address, - prefix_set = ?input.prefix_set.len(), - target_slots = ?target_slots_len, - proof_time = ?proof_start.elapsed(), - "Completed storage proof task calculation" + hashed_address = ?hashed_address, + proof_time_us = proof_start.elapsed().as_micros(), + worker_id = self.id, + "Completed storage proof calculation" );   - // send the result back - if let Err(error) = result_sender.send(decoded_result) { - debug!( - target: "trie::proof_task", - hashed_address = ?input.hashed_address, - ?error, - task_time = ?proof_start.elapsed(), - "Storage proof receiver is dropped, discarding the result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); - } - - /// Retrieves blinded account node by path. - fn blinded_account_node( - self, - path: Nibbles, - result_sender: Sender<TrieNodeProviderResult>, - tx_sender: Sender<ProofTaskMessage<Tx>>, - ) { - trace!( - target: "trie::proof_task", - ?path, - "Starting blinded account node retrieval" - ); - - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().trie_node(&path); - trace!( - target: "trie::proof_task", - ?path, - elapsed = ?start.elapsed(), - "Completed blinded account node retrieval" - ); - - if let Err(error) = result_sender.send(result) { - tracing::error!( - target: "trie::proof_task", - ?path, - ?error, - "Failed to send blinded account node result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); - } - - /// Retrieves blinded storage node of the given account by path. - fn blinded_storage_node( - self, - account: B256, - path: Nibbles, - result_sender: Sender<TrieNodeProviderResult>, - tx_sender: Sender<ProofTaskMessage<Tx>>, - ) { - trace!( - target: "trie::proof_task", - ?account, - ?path, - "Starting blinded storage node retrieval" - ); - - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path); - trace!( - target: "trie::proof_task", - ?account, - ?path, - elapsed = ?start.elapsed(), - "Completed blinded storage node retrieval" - ); - - if let Err(error) = result_sender.send(result) { - tracing::error!( - target: "trie::proof_task", - ?account, - ?path, - ?error, - "Failed to send blinded storage node result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); + decoded_result } }   @@ -464,6 +769,56 @@ } } }   +/// Input parameters for account multiproof computation. +#[derive(Debug, Clone)] +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option<Arc<MultiAddedRemovedKeys>>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + pub missed_leaves_storage_roots: Arc<DashMap<B256, B256>>, +} + +/// Parameters for building an account multiproof with pre-computed storage roots. +struct AccountMultiproofParams<'a> { + /// The targets for which to compute the multiproof. + targets: &'a MultiProofTargets, + /// The prefix set for the account trie walk. + prefix_set: PrefixSet, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option<&'a Arc<MultiAddedRemovedKeys>>, + /// Receivers for storage proofs being computed in parallel. + storage_proof_receivers: B256Map<Receiver<StorageProofResult>>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + missed_leaves_storage_roots: &'a DashMap<B256, B256>, +} + +/// Internal message for account workers. +#[derive(Debug)] +enum AccountWorkerJob { + /// Account multiproof computation request + AccountMultiproof { + /// Account multiproof input parameters + input: AccountMultiproofInput, + /// Channel to send result back to original caller + result_sender: Sender<AccountMultiproofResult>, + }, + /// Blinded account node retrieval request + BlindedAccountNode { + /// Path to the account node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender<TrieNodeProviderResult>, + }, +} + /// Data used for initializing cursor factories that is shared across all storage proof instances. #[derive(Debug, Clone)] pub struct ProofTaskCtx { @@ -489,121 +844,272 @@ Self { nodes_sorted, state_sorted, prefix_sets } } }   -/// Message used to communicate with [`ProofTaskManager`]. -#[derive(Debug)] -pub enum ProofTaskMessage<Tx> { - /// A request to queue a proof task. - QueueTask(ProofTaskKind), - /// A returned database transaction. - Transaction(ProofTaskTx<Tx>), - /// A request to terminate the proof task manager. - Terminate, +/// A handle that provides type-safe access to proof worker pools. +/// +/// The handle stores direct senders to both storage and account worker pools, +/// eliminating the need for a routing thread. All handles share reference-counted +/// channels, and workers shut down gracefully when all handles are dropped. +#[derive(Debug, Clone)] +pub struct ProofWorkerHandle { + /// Direct sender to storage worker pool + storage_work_tx: CrossbeamSender<StorageWorkerJob>, + /// Direct sender to account worker pool + account_work_tx: CrossbeamSender<AccountWorkerJob>, }   -/// Proof task kind. -/// -/// When queueing a task using [`ProofTaskMessage::QueueTask`], this enum -/// specifies the type of proof task to be executed. -#[derive(Debug)] -pub enum ProofTaskKind { - /// A storage proof request. - StorageProof(StorageProofInput, Sender<StorageProofResult>), - /// A blinded account node request. - BlindedAccountNode(Nibbles, Sender<TrieNodeProviderResult>), - /// A blinded storage node request. - BlindedStorageNode(B256, Nibbles, Sender<TrieNodeProviderResult>), -} +impl ProofWorkerHandle { + /// Spawns storage and account worker pools with dedicated database transactions. + /// + /// Returns a handle for submitting proof tasks to the worker pools. + /// Workers run until the last handle is dropped. + /// + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `view`: Consistent database view for creating transactions + /// - `task_ctx`: Shared context with trie updates and prefix sets + /// - `storage_worker_count`: Number of storage workers to spawn + /// - `account_worker_count`: Number of account workers to spawn + pub fn new<Factory>( + executor: Handle, + view: ConsistentDbView<Factory>, + task_ctx: ProofTaskCtx, + storage_worker_count: usize, + account_worker_count: usize, + ) -> Self + where + Factory: DatabaseProviderFactory<Provider: BlockReader> + Clone + 'static, + { + let (storage_work_tx, storage_work_rx) = unbounded::<StorageWorkerJob>(); + let (account_work_tx, account_work_rx) = unbounded::<AccountWorkerJob>(); + + tracing::debug!( + target: "trie::proof_task", + storage_worker_count, + account_worker_count, + "Spawning proof worker pools" + ); + + // Spawn storage workers + for worker_id in 0..storage_worker_count { + let view_clone = view.clone(); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = storage_work_rx.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + storage_worker_loop( + view_clone, + task_ctx_clone, + work_rx_clone, + worker_id, + #[cfg(feature = "metrics")] + metrics, + ) + });   -/// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the -/// number of active handles went to zero. -#[derive(Debug)] -pub struct ProofTaskManagerHandle<Tx> { - /// The sender for the proof task manager. - sender: Sender<ProofTaskMessage<Tx>>, - /// The number of active handles. - active_handles: Arc<AtomicUsize>, -} + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Storage worker spawned successfully" + ); + }   -impl<Tx> ProofTaskManagerHandle<Tx> { - /// Creates a new [`ProofTaskManagerHandle`] with the given sender. - pub fn new(sender: Sender<ProofTaskMessage<Tx>>, active_handles: Arc<AtomicUsize>) -> Self { - active_handles.fetch_add(1, Ordering::SeqCst); - Self { sender, active_handles } + // Spawn account workers + for worker_id in 0..account_worker_count { + let view_clone = view.clone(); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = account_work_rx.clone(); + let storage_work_tx_clone = storage_work_tx.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + account_worker_loop( + view_clone, + task_ctx_clone, + work_rx_clone, + storage_work_tx_clone, + worker_id, + #[cfg(feature = "metrics")] + metrics, + ) + }); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Account worker spawned successfully" + ); + } + + Self::new_handle(storage_work_tx, account_work_tx) }   - /// Queues a task to the proof task manager. - pub fn queue_task(&self, task: ProofTaskKind) -> Result<(), SendError<ProofTaskMessage<Tx>>> { - self.sender.send(ProofTaskMessage::QueueTask(task)) + /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. + /// + /// This is an internal constructor used for creating handles. + const fn new_handle( + storage_work_tx: CrossbeamSender<StorageWorkerJob>, + account_work_tx: CrossbeamSender<AccountWorkerJob>, + ) -> Self { + Self { storage_work_tx, account_work_tx } }   - /// Terminates the proof task manager. - pub fn terminate(&self) { - let _ = self.sender.send(ProofTaskMessage::Terminate); + /// Queue a storage proof computation + pub fn queue_storage_proof( + &self, + input: StorageProofInput, + ) -> Result<Receiver<StorageProofResult>, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::StorageProof { input, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; + + Ok(rx) } -} + + /// Queue an account multiproof computation + pub fn dispatch_account_multiproof( + &self, + input: AccountMultiproofInput, + ) -> Result<Receiver<AccountMultiproofResult>, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::AccountMultiproof { input, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; + + Ok(rx) + } + + /// Internal: Queue blinded storage node request + fn queue_blinded_storage_node( + &self, + account: B256, + path: Nibbles, + ) -> Result<Receiver<TrieNodeProviderResult>, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?;   -impl<Tx> Clone for ProofTaskManagerHandle<Tx> { - fn clone(&self) -> Self { - Self::new(self.sender.clone(), self.active_handles.clone()) + Ok(rx) } -} + + /// Internal: Queue blinded account node request + fn queue_blinded_account_node( + &self, + path: Nibbles, + ) -> Result<Receiver<TrieNodeProviderResult>, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::BlindedAccountNode { path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?;   -impl<Tx> Drop for ProofTaskManagerHandle<Tx> { - fn drop(&mut self) { - // Decrement the number of active handles and terminate the manager if it was the last - // handle. - if self.active_handles.fetch_sub(1, Ordering::SeqCst) == 1 { - self.terminate(); - } + Ok(rx) } }   -impl<Tx: DbTx> TrieNodeProviderFactory for ProofTaskManagerHandle<Tx> { - type AccountNodeProvider = ProofTaskTrieNodeProvider<Tx>; - type StorageNodeProvider = ProofTaskTrieNodeProvider<Tx>; +impl TrieNodeProviderFactory for ProofWorkerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider;   fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } + ProofTaskTrieNodeProvider::AccountNode { handle: self.clone() } }   fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() } + ProofTaskTrieNodeProvider::StorageNode { account, handle: self.clone() } } }   /// Trie node provider for retrieving trie nodes by path. #[derive(Debug)] -pub enum ProofTaskTrieNodeProvider<Tx> { +pub enum ProofTaskTrieNodeProvider { /// Blinded account trie node provider. AccountNode { - /// Sender to the proof task. - sender: Sender<ProofTaskMessage<Tx>>, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, }, /// Blinded storage trie node provider. StorageNode { /// Target account. account: B256, - /// Sender to the proof task. - sender: Sender<ProofTaskMessage<Tx>>, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, }, }   -impl<Tx: DbTx> TrieNodeProvider for ProofTaskTrieNodeProvider<Tx> { +impl TrieNodeProvider for ProofTaskTrieNodeProvider { fn trie_node(&self, path: &Nibbles) -> Result<Option<RevealedNode>, SparseTrieError> { - let (tx, rx) = channel(); match self { - Self::AccountNode { sender } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedAccountNode(*path, tx), - )); + Self::AccountNode { handle } => { + let rx = handle + .queue_blinded_account_node(*path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } - Self::StorageNode { sender, account } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedStorageNode(*account, *path, tx), - )); + Self::StorageNode { handle, account } => { + let rx = handle + .queue_blinded_storage_node(*account, *path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } } + } +}   - rx.recv().unwrap() +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::map::B256Map; + use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; + use reth_trie_common::{ + prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, HashedAccountsSorted, + HashedPostStateSorted, + }; + use std::sync::Arc; + use tokio::{runtime::Builder, task}; + + fn test_ctx() -> ProofTaskCtx { + ProofTaskCtx::new( + Arc::new(TrieUpdatesSorted::default()), + Arc::new(HashedPostStateSorted::new( + HashedAccountsSorted::default(), + B256Map::default(), + )), + Arc::new(TriePrefixSetsMut::default()), + ) + } + + /// Ensures `ProofWorkerHandle::new` spawns workers correctly. + #[test] + fn spawn_proof_workers_creates_handle() { + let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); + runtime.block_on(async { + let handle = tokio::runtime::Handle::current(); + let factory = create_test_provider_factory(); + let view = ConsistentDbView::new(factory, None); + let ctx = test_ctx(); + + let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); + + // Verify handle can be cloned + let _cloned_handle = proof_handle.clone(); + + // Workers shut down automatically when handle is dropped + drop(proof_handle); + task::yield_now().await; + }); } }
diff --git reth/crates/trie/parallel/src/proof_task_metrics.rs scroll-reth/crates/trie/parallel/src/proof_task_metrics.rs index cdb59d078d83b472a9c2f94a8c82b0f2c3be8345..6492e28d12dffb0ccecf384779b8d15f7145ab49 100644 --- reth/crates/trie/parallel/src/proof_task_metrics.rs +++ scroll-reth/crates/trie/parallel/src/proof_task_metrics.rs @@ -1,24 +1,5 @@ use reth_metrics::{metrics::Histogram, Metrics};   -/// Metrics for blinded node fetching for the duration of the proof task manager. -#[derive(Clone, Debug, Default)] -pub struct ProofTaskMetrics { - /// The actual metrics for blinded nodes. - pub task_metrics: ProofTaskTrieMetrics, - /// Count of blinded account node requests. - pub account_nodes: usize, - /// Count of blinded storage node requests. - pub storage_nodes: usize, -} - -impl ProofTaskMetrics { - /// Record the blinded node counts into the histograms. - pub fn record(&self) { - self.task_metrics.record_account_nodes(self.account_nodes); - self.task_metrics.record_storage_nodes(self.storage_nodes); - } -} - /// Metrics for the proof task. #[derive(Clone, Metrics)] #[metrics(scope = "trie.proof_task")]
diff --git reth/crates/trie/parallel/src/storage_root_targets.rs scroll-reth/crates/trie/parallel/src/storage_root_targets.rs index f844b70fca54667fa5762aab49b679719a6225e3..0c6d9f43498265a1ce09dab95923ff5b12400e89 100644 --- reth/crates/trie/parallel/src/storage_root_targets.rs +++ scroll-reth/crates/trie/parallel/src/storage_root_targets.rs @@ -24,6 +24,23 @@ .chain(storage_prefix_sets) .collect(), ) } + + /// Returns the total number of unique storage root targets without allocating new maps. + pub fn count( + account_prefix_set: &PrefixSet, + storage_prefix_sets: &B256Map<PrefixSet>, + ) -> usize { + let mut count = storage_prefix_sets.len(); + + for nibbles in account_prefix_set { + let hashed_address = B256::from_slice(&nibbles.pack()); + if !storage_prefix_sets.contains_key(&hashed_address) { + count += 1; + } + } + + count + } }   impl IntoIterator for StorageRootTargets {
diff --git reth/crates/trie/sparse-parallel/src/trie.rs scroll-reth/crates/trie/sparse-parallel/src/trie.rs index d973d705de2a15d38dcaa44de3c1689f7f7d2e85..50c9a79bd0537d4646e955df346eda8d7a8ea0f4 100644 --- reth/crates/trie/sparse-parallel/src/trie.rs +++ scroll-reth/crates/trie/sparse-parallel/src/trie.rs @@ -623,51 +623,19 @@ ?remaining_child_path, "Branch node has only one child", );   - let remaining_child_subtrie = self.subtrie_for_path_mut(&remaining_child_path); - // If the remaining child node is not yet revealed then we have to reveal it here, // otherwise it's not possible to know how to collapse the branch. - let remaining_child_node = - match remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() { - SparseNode::Hash(_) => { - debug!( - target: "trie::parallel_sparse", - child_path = ?remaining_child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&remaining_child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::parallel_sparse", - ?remaining_child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - remaining_child_subtrie.reveal_node( - remaining_child_path, - &decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() - } else { - return Err(SparseTrieErrorKind::NodeNotFoundInProvider { - path: remaining_child_path, - } - .into()) - } - } - node => node, - }; + let remaining_child_node = self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_child_path, + true, // recurse_into_extension + )?;   let (new_branch_node, remove_child) = Self::branch_changes_on_leaf_removal( branch_path, &remaining_child_path, - remaining_child_node, + &remaining_child_node, );   if remove_child { @@ -1226,6 +1194,90 @@ } // For a branch node, we just leave the extension node as-is. SparseNode::Branch { .. } => None, } + } + + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal<P: TrieNodeProvider>( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult<SparseNode> { + let remaining_child_subtrie = self.subtrie_for_path_mut(remaining_child_path); + + let remaining_child_node = + match remaining_child_subtrie.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + remaining_child_subtrie.reveal_node( + *remaining_child_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + remaining_child_subtrie.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) }   /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to @@ -4077,6 +4129,185 @@ ); }   #[test] + fn test_remove_leaf_remaining_extension_node_child_is_revealed() { + let branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]); + let removed_branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2]); + + // Convert the logs into reveal_nodes call on a fresh ParallelSparseTrie + let nodes = vec![ + // Branch at 0x4f8807 + RevealedSparseNode { + path: branch_path, + node: { + TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::from(hex!( + "dede882d52f0e0eddfb5b89293a10c87468b4a73acd0d4ae550054a92353f6d5" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "8746f18e465e2eed16117306b6f2eef30bc9d2978aee4a7838255e39c41a3222" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "35a4ea861548af5f0262a9b6d619b4fc88fce6531cbd004eab1530a73f34bbb1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "47d5c2bf9eea5c1ee027e4740c2b86159074a27d52fd2f6a8a8c86c77e48006f" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "eb76a359b216e1d86b1f2803692a9fe8c3d3f97a9fe6a82b396e30344febc0c1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "437656f2697f167b23e33cb94acc8550128cfd647fc1579d61e982cb7616b8bc" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "45a1ac2faf15ea8a4da6f921475974e0379f39c3d08166242255a567fa88ce6c" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7dbb299d714d3dfa593f53bc1b8c66d5c401c30a0b5587b01254a56330361395" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "ae407eb14a74ed951c9949c1867fb9ee9ba5d5b7e03769eaf3f29c687d080429" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "768d0fe1003f0e85d3bc76e4a1fa0827f63b10ca9bca52d56c2b1cceb8eb8b08" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "e5127935143493d5094f4da6e4f7f5a0f62d524fbb61e7bb9fb63d8a166db0f3" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7f3698297308664fbc1b9e2c41d097fbd57d8f364c394f6ad7c71b10291fbf42" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "4a2bc7e19cec63cb5ef5754add0208959b50bcc79f13a22a370f77b277dbe6db" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "40764b8c48de59258e62a3371909a107e76e1b5e847cfa94dbc857e9fd205103" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "2985dca29a7616920d95c43ab62eb013a40e6a0c88c284471e4c3bd22f3b9b25" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "1b6511f7a385e79477239f7dd4a49f52082ecac05aa5bd0de18b1d55fe69d10c" + ))), + ], + TrieMask::new(0b1111111111111111), + )) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b1111111111111111)), + tree_mask: Some(TrieMask::new(0b0011110100100101)), + }, + }, + // Branch at 0x4f88072 + RevealedSparseNode { + path: removed_branch_path, + node: { + let stack = vec![ + RlpNode::word_rlp(&B256::from(hex!( + "15fd4993a41feff1af3b629b32572ab05acddd97c681d82ec2eb89c8a8e3ab9e" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "a272b0b94ced4e6ec7adb41719850cf4a167ad8711d0dda6a810d129258a0d94" + ))), + ]; + let branch_node = BranchNode::new(stack, TrieMask::new(0b0001000000000100)); + TrieNode::Branch(branch_node) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b0000000000000000)), + tree_mask: Some(TrieMask::new(0b0000000000000100)), + }, + }, + // Extension at 0x4f880722 + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2]), + node: { + let extension_node = ExtensionNode::new( + Nibbles::from_nibbles([0x6]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + ); + TrieNode::Extension(extension_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + // Leaf at 0x4f88072c + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc]), + node: { + let leaf_node = LeafNode::new( + Nibbles::from_nibbles([ + 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, 0x0, 0x8, 0x8, 0xd, 0xf, + 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, 0xf, 0xa, + 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, + 0xd, 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]), + hex::decode("8468d3971d").unwrap(), + ); + TrieNode::Leaf(leaf_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + ]; + + // Create a fresh ParallelSparseTrie + let mut trie = ParallelSparseTrie::from_root( + TrieNode::Extension(ExtensionNode::new( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + )), + TrieMasks::none(), + true, + ) + .unwrap(); + + // Call reveal_nodes + trie.reveal_nodes(nodes).unwrap(); + + // Remove the leaf at "0x4f88072c077f86613088dfcae648abe831fadca55ad43ab165d1680dd567b5d6" + let leaf_key = Nibbles::from_nibbles([ + 0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc, 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, + 0x0, 0x8, 0x8, 0xd, 0xf, 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, + 0xf, 0xa, 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, 0xd, + 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]); + + let mut provider = MockTrieNodeProvider::new(); + let revealed_branch = create_branch_node_with_children(&[], []); + let mut encoded = Vec::new(); + revealed_branch.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2, 0x6]), + RevealedNode { + node: encoded.into(), + tree_mask: None, + // Give it a fake hashmask so that it appears like it will be stored in the db + hash_mask: Some(TrieMask::new(0b1111)), + }, + ); + + trie.remove_leaf(&leaf_key, provider).unwrap(); + + // Calculate root so that updates are calculated. + trie.root(); + + // Take updates and assert they are correct + let updates = trie.take_updates(); + assert_eq!( + updates.removed_nodes.into_iter().collect::<Vec<_>>(), + vec![removed_branch_path] + ); + assert_eq!(updates.updated_nodes.len(), 1); + let updated_node = updates.updated_nodes.get(&branch_path).unwrap(); + + // Second bit must be set, indicating that the extension's child is in the db + assert_eq!(updated_node.tree_mask, TrieMask::new(0b011110100100101),) + } + + #[test] fn test_parallel_sparse_trie_root() { // Step 1: Create the trie structure // Extension node at 0x with key 0x2 (goes to upper subtrie) @@ -4764,7 +4995,7 @@ run_hash_builder( state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::<Vec<_>>(), + state.keys().copied(), );   // Write trie updates to the database @@ -4809,7 +5040,7 @@ keys_to_delete .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::<Vec<_>>(), + state.keys().copied(), );   // Write trie updates to the database
diff --git reth/crates/trie/sparse/src/trie.rs scroll-reth/crates/trie/sparse/src/trie.rs index 76dadc8fc9c656c2a56e09b5b9043cd7676dff2b..89a23851e28076a0057f0314b8223abfe6aa79fb 100644 --- reth/crates/trie/sparse/src/trie.rs +++ scroll-reth/crates/trie/sparse/src/trie.rs @@ -821,38 +821,17 @@ child_path.push_unchecked(child_nibble);   trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child");   - if self.nodes.get(&child_path).unwrap().is_hash() { - debug!( - target: "trie::sparse", - ?child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - self.reveal_node( - child_path, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - - // Get the only child node. - let child = self.nodes.get(&child_path).unwrap(); + // If the remaining child node is not yet revealed then we have to reveal + // it here, otherwise it's not possible to know how to collapse the branch. + let child = self.reveal_remaining_child_on_leaf_removal( + &provider, + full_path, + &child_path, + true, // recurse_into_extension + )?;   let mut delete_child = false; - let new_node = match child { + let new_node = match &child { SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), &SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { @@ -1254,6 +1233,87 @@ } }   Ok(nodes) + } + + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal<P: TrieNodeProvider>( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult<SparseNode> { + let remaining_child_node = match self.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + *remaining_child_path, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + self.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) }   /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified @@ -2971,7 +3031,7 @@ run_hash_builder( state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::<Vec<_>>(), + state.keys().copied(), );   // Write trie updates to the database @@ -3013,7 +3073,7 @@ keys_to_delete .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::<Vec<_>>(), + state.keys().copied(), );   // Write trie updates to the database
diff --git reth/crates/trie/trie/Cargo.toml scroll-reth/crates/trie/trie/Cargo.toml index 403d187e46a9571cc95e2ab854637b5ecca2f7dc..baf41712be8fecda819b18d1a419f893eef6514e 100644 --- reth/crates/trie/trie/Cargo.toml +++ scroll-reth/crates/trie/trie/Cargo.toml @@ -18,7 +18,7 @@ reth-primitives-traits.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie-sparse.workspace = true -reth-trie-common = { workspace = true, features = ["rayon"] } +reth-trie-common.workspace = true   revm-database.workspace = true   @@ -65,6 +65,8 @@ proptest-arbitrary-interop.workspace = true proptest.workspace = true   [features] +default = ["rayon"] +rayon = ["reth-trie-common/rayon"] metrics = ["reth-metrics", "dep:metrics"] serde = [ "alloy-primitives/serde", @@ -91,6 +93,7 @@ "reth-trie-common/test-utils", "reth-ethereum-primitives/test-utils", "reth-trie-sparse/test-utils", "reth-stages-types/test-utils", + "reth-primitives-traits/test-utils", ]   [[bench]]
diff --git reth/crates/trie/trie/src/verify.rs scroll-reth/crates/trie/trie/src/verify.rs index 5f2260bc7dc66761330f0c581ca5fbe3b6b772cd..96059211458dae39330c9c9bbc40810fdf21b1cf 100644 --- reth/crates/trie/trie/src/verify.rs +++ scroll-reth/crates/trie/trie/src/verify.rs @@ -400,9 +400,8 @@ // If there was a previous storage account, and it is the final one, then we // need to validate that all accounts coming after it have empty storages. let prev_account = *prev_account;   - // Calculate the max possible account address. - let mut max_account = B256::ZERO; - max_account.reverse(); + // Calculate the max possible account address (all bits set). + let max_account = B256::from([0xFFu8; 32]);   self.verify_empty_storages(prev_account, max_account, false, true)?; }
diff --git reth/.config/zepter.yaml scroll-reth/.config/zepter.yaml index b754d06a062c0a45dcbd02ea103ac05ccaa43f55..3fb82bec823f2ef4cb5a1c6f37478212de9560ed 100644 --- reth/.config/zepter.yaml +++ scroll-reth/.config/zepter.yaml @@ -8,14 +8,14 @@ # The examples in the following comments assume crate `A` to have a dependency on crate `B`. workflows: check: - [ - "lint", - # Check that `A` activates the features of `B`. - "propagate-feature", - # These are the features to check: - "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat", - # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. - "--left-side-feature-missing=ignore", - # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + "lint", + # Check that `A` activates the features of `B`. + "propagate-feature", + # These are the features to check: + "--features=std,op,scroll-alloy-traits,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat", + # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. + "--left-side-feature-missing=ignore", + # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on.   "--left-side-outside-workspace=ignore", # Auxiliary flags:
diff --git reth/.gitignore scroll-reth/.gitignore index a9b9f4768d53b8b82007f2ee9f87cfa8b87f8314..894ca5f346b71dfad74d9797c0da4bdd0bf44629 100644 --- reth/.gitignore +++ scroll-reth/.gitignore @@ -2,6 +2,7 @@ # Generated by Cargo # will have compiled files and executables ./debug/ target/ +datadir/   # These are backup files generated by rustfmt **/*.rs.bk
diff --git reth/Cargo.lock scroll-reth/Cargo.lock index 8350347b6b4fc0e2d189c796b7eb00aa36307f07..9c4746f0b38b1dab044f53eaece35ed2e8ae83de 100644 --- reth/Cargo.lock +++ scroll-reth/Cargo.lock @@ -112,9 +112,9 @@ ]   [[package]] name = "alloy-consensus" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85" +checksum = "6a0dd3ed764953a6b20458b2b7abbfdc93d20d14b38babe1a70fe631a443a9f1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -139,9 +139,9 @@ ]   [[package]] name = "alloy-consensus-any" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4" +checksum = "9556182afa73cddffa91e64a5aa9508d5e8c912b3a15f26998d2388a824d2c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -176,9 +176,9 @@ ]   [[package]] name = "alloy-dyn-abi" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6c2905bafc2df7ccd32ca3af13f0b0d82f2e2ff9dfbeb12196c0d978d5c0deb" +checksum = "3fdff496dd4e98a81f4861e66f7eaf5f2488971848bb42d9c892f871730245c8" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -237,9 +237,9 @@ ]   [[package]] name = "alloy-eips" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836" +checksum = "305fa99b538ca7006b0c03cfed24ec6d82beda67aac857ef4714be24231d15e6" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -256,15 +256,15 @@ "ethereum_ssz", "ethereum_ssz_derive", "serde", "serde_with", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", ]   [[package]] name = "alloy-evm" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a5f67ee74999aa4fe576a83be1996bdf74a30fce3d248bf2007d6fc7dae8aa" +checksum = "24a48fa6a4a5a69ae8e46c0ae60851602c5016baa3379d076c76e4c2f3b889f7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -299,9 +299,9 @@ ]   [[package]] name = "alloy-hardforks" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889eb3949b58368a09d4f16931c660275ef5fb08e5fbd4a96573b19c7085c41f" +checksum = "4b16ee6b2c7d39da592d30a5f9607a83f50ee5ec2a2c301746cc81e91891f4ca" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -313,9 +313,9 @@ ]   [[package]] name = "alloy-json-abi" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2acb6637a9c0e1cdf8971e0ced8f3fa34c04c5e9dccf6bb184f6a64fe0e37d8" +checksum = "5513d5e6bd1cba6bdcf5373470f559f320c05c8c59493b6e98912fbe6733943f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -366,9 +366,9 @@ ]   [[package]] name = "alloy-network-primitives" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38" +checksum = "223612259a080160ce839a4e5df0125ca403a1d5e7206cc911cea54af5d769aa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,9 +379,9 @@ ]   [[package]] name = "alloy-op-evm" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17aaeb600740c181bf29c9f138f9b228d115ea74fa6d0f0343e1952f1a766968" +checksum = "d1e0abe910a26d1b3686f4f6ad58287ce8c7fb85b08603d8c832869f02eb3d79" dependencies = [ "alloy-consensus", "alloy-eips", @@ -392,13 +392,14 @@ "auto_impl", "op-alloy-consensus", "op-revm", "revm", + "thiserror 2.0.16", ]   [[package]] name = "alloy-op-hardforks" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599c1d7dfbccb66603cb93fde00980d12848d32fe5e814f50562104a92df6487" +checksum = "af8bb236fc008fd3b83b2792e30ae79617a99ffc4c3f584f0c9b4ce0a2da52de" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -409,9 +410,9 @@ ]   [[package]] name = "alloy-primitives" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b77f7d5e60ad8ae6bd2200b8097919712a07a6db622a4b201e7ead6166f02e5" +checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" dependencies = [ "alloy-rlp", "arbitrary", @@ -654,9 +655,9 @@ ]   [[package]] name = "alloy-rpc-types-eth" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f" +checksum = "6d7d47bca1a2a1541e4404aa38b7e262bb4dffd9ac23b4f178729a4ddc5a5caa" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -691,9 +692,9 @@ ]   [[package]] name = "alloy-rpc-types-trace" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f10620724bd45f80c79668a8cdbacb6974f860686998abce28f6196ae79444" +checksum = "c331c8e48665607682e8a9549a2347c13674d4fbcbdc342e7032834eba2424f4" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -717,9 +718,9 @@ ]   [[package]] name = "alloy-serde" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b" +checksum = "6a8468f1a7f9ee3bae73c24eead0239abea720dbf7779384b9c7e20d51bfb6b0" dependencies = [ "alloy-primitives", "arbitrary", @@ -763,9 +764,9 @@ ]   [[package]] name = "alloy-sol-macro" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c84c3637bee9b5c4a4d2b93360ee16553d299c3b932712353caf1cea76d0e6" +checksum = "f3ce480400051b5217f19d6e9a82d9010cdde20f1ae9c00d53591e4a1afbb312" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -777,9 +778,9 @@ ]   [[package]] name = "alloy-sol-macro-expander" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a882aa4e1790063362434b9b40d358942b188477ac1c44cfb8a52816ffc0cc17" +checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -795,9 +796,9 @@ ]   [[package]] name = "alloy-sol-macro-input" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e5772107f9bb265d8d8c86e0733937bb20d0857ea5425b1b6ddf51a9804042" +checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" dependencies = [ "const-hex", "dunce", @@ -811,9 +812,9 @@ ]   [[package]] name = "alloy-sol-type-parser" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e188b939aa4793edfaaa099cb1be4e620036a775b4bdf24fdc56f1cd6fd45890" +checksum = "954d1b2533b9b2c7959652df3076954ecb1122a28cc740aa84e7b0a49f6ac0a9" dependencies = [ "serde", "winnow", @@ -821,9 +822,9 @@ ]   [[package]] name = "alloy-sol-types" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c8a9a909872097caffc05df134e5ef2253a1cdb56d3a9cf0052a042ac763f9" +checksum = "70319350969a3af119da6fb3e9bddb1bce66c9ea933600cb297c8b1850ad2a3c" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -862,7 +863,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2f8a6338d594f6c6481292215ee8f2fd7b986c80aba23f3f44e761a8658de78" dependencies = [ "alloy-json-rpc", + "alloy-rpc-types-engine", "alloy-transport", + "http-body-util", + "hyper", + "hyper-tls", + "hyper-util", + "jsonwebtoken", "reqwest", "serde_json", "tower", @@ -930,9 +937,9 @@ ]   [[package]] name = "alloy-tx-macros" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f" +checksum = "7bf39928a5e70c9755d6811a2928131b53ba785ad37c8bf85c90175b5d43b818" dependencies = [ "alloy-primitives", "darling 0.21.3", @@ -1673,15 +1680,6 @@ ]   [[package]] name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" @@ -1794,7 +1792,7 @@ "boa_macros", "hashbrown 0.15.5", "indexmap 2.11.4", "once_cell", - "phf", + "phf 0.11.3", "rustc-hash 2.1.1", "static_assertions", ] @@ -1885,7 +1883,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2 0.10.9", + "sha2", "tinyvec", ]   @@ -2251,7 +2249,7 @@ "digest 0.10.7", "hmac", "k256", "serde", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ]   @@ -2267,7 +2265,7 @@ "hmac", "once_cell", "pbkdf2", "rand 0.8.5", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ]   @@ -2285,7 +2283,7 @@ "digest 0.10.7", "generic-array", "ripemd", "serde", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 1.0.69", ] @@ -2436,6 +2434,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ "unicode-segmentation", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", ]   [[package]] @@ -2939,7 +2947,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -2973,7 +2981,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.61.1", + "windows-sys 0.59.0", ]   [[package]] @@ -3093,7 +3101,7 @@ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.9", + "sha2", "subtle", "zeroize", ] @@ -3187,6 +3195,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"   [[package]] +name = "encoder-standard" +version = "0.1.0" +source = "git+https://github.com/scroll-tech/da-codec#ef47e96e3a94f7eaa0772a8de2496b8eb7e0e5b9" +dependencies = [ + "zstd", +] + +[[package]] name = "enr" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3239,6 +3255,17 @@ "syn 2.0.106", ]   [[package]] +name = "enumn" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3251,7 +3278,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.1", + "windows-sys 0.59.0", ]   [[package]] @@ -3271,7 +3298,7 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.9", + "sha2", ]   [[package]] @@ -3489,6 +3516,7 @@ "reth-op", "reth-optimism-flashblocks", "reth-optimism-forks", "reth-payload-builder", + "reth-primitives-traits", "reth-rpc-api", "reth-rpc-engine-api", "revm", @@ -3865,6 +3893,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"   [[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] name = "form_urlencoded" version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4500,6 +4543,22 @@ "webpki-roots 1.0.2", ]   [[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] name = "hyper-util" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4518,9 +4577,11 @@ "libc", "percent-encoding", "pin-project-lite", "socket2 0.6.0", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ]   [[package]] @@ -4535,7 +4596,7 @@ "iana-time-zone-haiku", "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.1", + "windows-core 0.58.0", ]   [[package]] @@ -5274,7 +5335,7 @@ "ecdsa", "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.9", + "sha2", "signature", ]   @@ -5370,7 +5431,7 @@ "hkdf", "k256", "multihash", "quick-protobuf", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tracing", "zeroize", @@ -5399,52 +5460,6 @@ "redox_syscall", ]   [[package]] -name = "libsecp256k1" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79019718125edc905a079a70cfa5f3820bc76139fc91d6f9abc27ea2a887139" -dependencies = [ - "arrayref", - "base64 0.22.1", - "digest 0.9.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] name = "libz-sys" version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -5869,6 +5884,23 @@ "unsigned-varint", ]   [[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + +[[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -6202,9 +6234,8 @@ ]   [[package]] name = "op-revm" -version = "10.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9ba4f4693811e73449193c8bd656d3978f265871916882e6a51a487e4f96217" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "revm", @@ -6218,16 +6249,54 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"   [[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"   [[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", @@ -6239,25 +6308,23 @@ ]   [[package]] name = "opentelemetry-http" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", - "tracing", ]   [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ - "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -6271,38 +6338,36 @@ ]   [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", "tonic", + "tonic-prost", ]   [[package]] name = "opentelemetry-semantic-conventions" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3" +checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846"   [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.2", - "serde_json", "thiserror 2.0.16", - "tracing", ]   [[package]] @@ -6329,7 +6394,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "sha2 0.10.9", + "sha2", ]   [[package]] @@ -6460,8 +6525,18 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_macros", - "phf_shared", + "phf_macros 0.11.3", + "phf_shared 0.11.3", +] + +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_macros 0.13.1", + "phf_shared 0.13.1", "serde", ]   @@ -6471,18 +6546,41 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared", + "phf_shared 0.11.3", "rand 0.8.5", ]   [[package]] +name = "phf_generator" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" +dependencies = [ + "fastrand 2.3.0", + "phf_shared 0.13.1", +] + +[[package]] name = "phf_macros" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ - "phf_generator", - "phf_shared", + "phf_generator 0.11.3", + "phf_shared 0.11.3", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "phf_macros" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" +dependencies = [ + "phf_generator 0.13.1", + "phf_shared 0.13.1", "proc-macro2", "quote", "syn 2.0.106", @@ -6493,6 +6591,15 @@ name = "phf_shared" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" dependencies = [ "siphasher", ] @@ -6789,9 +6896,9 @@ ]   [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -6799,9 +6906,9 @@ ]   [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -6903,7 +7010,7 @@ "libc", "once_cell", "socket2 0.6.0", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.59.0", ]   [[package]] @@ -7691,6 +7798,7 @@ "reth-ethereum-primitives", "reth-optimism-primitives", "reth-primitives-traits", "reth-prune-types", + "reth-scroll-primitives", "reth-stages-types", "reth-storage-errors", "reth-trie-common", @@ -7940,7 +8048,7 @@ "pin-project", "rand 0.8.5", "reth-network-peers", "secp256k1 0.30.0", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 2.0.16", "tokio", @@ -7968,6 +8076,7 @@ "reth-payload-builder", "reth-payload-primitives", "reth-provider", "reth-transaction-pool", + "scroll-alloy-rpc-types-engine", "tokio", "tokio-stream", "tracing", @@ -8158,7 +8267,7 @@ "futures", "futures-util", "reqwest", "reth-fs-util", - "sha2 0.10.9", + "sha2", "tempfile", "test-case", "tokio", @@ -8355,7 +8464,7 @@ "reth-payload-primitives", "reth-primitives-traits", "serde", "serde_json", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", ]   @@ -8459,6 +8568,7 @@ "reth-storage-api", "reth-storage-errors", "reth-trie-common", "revm", + "scroll-alloy-evm", ]   [[package]] @@ -8623,6 +8733,7 @@ name = "reth-invalid-block-hooks" version = "1.8.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -8630,18 +8741,24 @@ "eyre", "futures", "jsonrpsee", "pretty_assertions", + "reth-chainspec", "reth-engine-primitives", + "reth-ethereum-primitives", "reth-evm", + "reth-evm-ethereum", "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", + "reth-testing-utils", "reth-tracing", "reth-trie", + "revm", "revm-bytecode", "revm-database", "serde", "serde_json", + "tempfile", ]   [[package]] @@ -8734,6 +8851,7 @@ "alloy-genesis", "alloy-primitives", "alloy-rlp", "aquamarine", + "async-trait", "auto_impl", "codspeed-criterion-compat", "derive_more", @@ -8801,6 +8919,7 @@ "reth-ethereum-forks", "reth-network-p2p", "reth-network-peers", "reth-network-types", + "reth-primitives-traits", "reth-tokio-util", "serde", "thiserror 2.0.16", @@ -9473,7 +9592,7 @@ "reth-storage-api", "reth-transaction-pool", "revm", "serde", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tracing", ] @@ -9662,6 +9781,7 @@ "reth-chain-state", "reth-chainspec", "reth-errors", "reth-primitives-traits", + "scroll-alloy-rpc-types-engine", "serde", "thiserror 2.0.16", "tokio", @@ -9737,6 +9857,7 @@ "reth-codecs", "revm-bytecode", "revm-primitives", "revm-state", + "scroll-alloy-consensus", "secp256k1 0.30.0", "serde", "serde_json", @@ -9769,7 +9890,6 @@ "reth-db-api", "reth-errors", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", - "reth-evm", "reth-execution-types", "reth-fs-util", "reth-metrics", @@ -9973,6 +10093,7 @@ "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", + "reth-scroll-evm", "reth-storage-api", "reth-tasks", "reth-testing-utils", @@ -9983,7 +10104,7 @@ "revm-inspectors", "revm-primitives", "serde", "serde_json", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tokio", "tokio-stream", @@ -10114,8 +10235,13 @@ "reth-ethereum-primitives", "reth-evm", "reth-optimism-primitives", "reth-primitives-traits", + "reth-scroll-primitives", "reth-storage-api", "revm-context", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-rpc-types", "serde_json", "thiserror 2.0.16", ] @@ -10209,6 +10335,7 @@ "reth-revm", "reth-rpc-convert", "reth-rpc-eth-types", "reth-rpc-server-types", + "reth-scroll-evm", "reth-storage-api", "reth-tasks", "reth-transaction-pool", @@ -10299,6 +10426,316 @@ "strum 0.27.2", ]   [[package]] +name = "reth-scroll-chainspec" +version = "1.8.2" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-serde", + "auto_impl", + "derive_more", + "once_cell", + "reth-chainspec", + "reth-ethereum-forks", + "reth-network-peers", + "reth-primitives-traits", + "reth-scroll-forks", + "reth-trie-common", + "scroll-alloy-hardforks", + "serde", + "serde_json", +] + +[[package]] +name = "reth-scroll-cli" +version = "1.8.2" +dependencies = [ + "clap", + "eyre", + "proptest", + "reth-cli", + "reth-cli-commands", + "reth-cli-runner", + "reth-consensus", + "reth-db", + "reth-node-builder", + "reth-node-core", + "reth-node-metrics", + "reth-scroll-chainspec", + "reth-scroll-consensus", + "reth-scroll-evm", + "reth-scroll-node", + "reth-scroll-primitives", + "reth-tracing", + "scroll-alloy-consensus", + "tracing", +] + +[[package]] +name = "reth-scroll-consensus" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "reth-chainspec", + "reth-consensus", + "reth-consensus-common", + "reth-ethereum-consensus", + "reth-execution-types", + "reth-primitives-traits", + "reth-scroll-chainspec", + "reth-scroll-primitives", + "scroll-alloy-consensus", + "scroll-alloy-hardforks", + "thiserror 2.0.16", + "tracing", +] + +[[package]] +name = "reth-scroll-engine-primitives" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-engine", + "arbitrary", + "eyre", + "rand 0.9.2", + "reth-chain-state", + "reth-chainspec", + "reth-engine-primitives", + "reth-payload-builder", + "reth-payload-primitives", + "reth-primitives", + "reth-primitives-traits", + "reth-scroll-chainspec", + "reth-scroll-primitives", + "scroll-alloy-hardforks", + "scroll-alloy-rpc-types-engine", + "serde", + "sha2", +] + +[[package]] +name = "reth-scroll-evm" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-primitives", + "alloy-rpc-types-engine", + "derive_more", + "eyre", + "reth-chainspec", + "reth-evm", + "reth-execution-types", + "reth-primitives", + "reth-primitives-traits", + "reth-scroll-chainspec", + "reth-scroll-forks", + "reth-scroll-primitives", + "reth-storage-api", + "revm", + "revm-primitives", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-hardforks", + "thiserror 2.0.16", + "tracing", +] + +[[package]] +name = "reth-scroll-forks" +version = "1.8.2" +dependencies = [ + "alloy-chains", + "alloy-primitives", + "auto_impl", + "once_cell", + "reth-ethereum-forks", + "scroll-alloy-hardforks", + "serde", +] + +[[package]] +name = "reth-scroll-node" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-genesis", + "alloy-primitives", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "clap", + "eyre", + "reth-chainspec", + "reth-db", + "reth-e2e-test-utils", + "reth-engine-local", + "reth-eth-wire-types", + "reth-evm", + "reth-network", + "reth-node-api", + "reth-node-builder", + "reth-node-core", + "reth-node-types", + "reth-payload-builder", + "reth-primitives", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-scroll-chainspec", + "reth-scroll-consensus", + "reth-scroll-engine-primitives", + "reth-scroll-evm", + "reth-scroll-node", + "reth-scroll-payload", + "reth-scroll-primitives", + "reth-scroll-rpc", + "reth-scroll-txpool", + "reth-tasks", + "reth-tracing", + "reth-transaction-pool", + "reth-trie-db", + "revm", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-hardforks", + "scroll-alloy-network", + "scroll-alloy-rpc-types", + "scroll-alloy-rpc-types-engine", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "reth-scroll-payload" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "alloy-rlp", + "futures-util", + "reth-basic-payload-builder", + "reth-chain-state", + "reth-chainspec", + "reth-evm", + "reth-execution-types", + "reth-payload-builder", + "reth-payload-primitives", + "reth-payload-util", + "reth-primitives-traits", + "reth-revm", + "reth-scroll-chainspec", + "reth-scroll-engine-primitives", + "reth-scroll-evm", + "reth-scroll-primitives", + "reth-storage-api", + "reth-transaction-pool", + "revm", + "scroll-alloy-hardforks", + "thiserror 2.0.16", + "tracing", +] + +[[package]] +name = "reth-scroll-primitives" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "arbitrary", + "bytes", + "modular-bitfield", + "once_cell", + "rand 0.9.2", + "reth-codecs", + "reth-primitives-traits", + "reth-zstd-compressors", + "rstest", + "scroll-alloy-consensus", + "serde", +] + +[[package]] +name = "reth-scroll-rpc" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-transport", + "alloy-transport-http", + "eyre", + "jsonrpsee-types", + "reqwest", + "reth-chainspec", + "reth-evm", + "reth-node-api", + "reth-node-builder", + "reth-primitives-traits", + "reth-provider", + "reth-rpc", + "reth-rpc-convert", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-scroll-chainspec", + "reth-scroll-evm", + "reth-scroll-primitives", + "reth-tasks", + "reth-transaction-pool", + "revm", + "scroll-alloy-consensus", + "scroll-alloy-hardforks", + "scroll-alloy-network", + "scroll-alloy-rpc-types", + "thiserror 2.0.16", + "tokio", + "tracing", +] + +[[package]] +name = "reth-scroll-txpool" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "c-kzg", + "derive_more", + "parking_lot", + "reth-chainspec", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-scroll-chainspec", + "reth-scroll-consensus", + "reth-scroll-evm", + "reth-scroll-forks", + "reth-scroll-primitives", + "reth-storage-api", + "reth-transaction-pool", + "revm-scroll", + "scroll-alloy-consensus", + "tracing", +] + +[[package]] name = "reth-stages" version = "1.8.2" dependencies = [ @@ -10573,18 +11010,21 @@ version = "1.8.2" dependencies = [ "clap", "eyre", + "reth-tracing-otlp", "rolling-file", "tracing", "tracing-appender", "tracing-journald", "tracing-logfmt", "tracing-subscriber 0.3.20", + "url", ]   [[package]] name = "reth-tracing-otlp" version = "1.8.2" dependencies = [ + "eyre", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -10592,6 +11032,7 @@ "opentelemetry_sdk", "tracing", "tracing-opentelemetry", "tracing-subscriber 0.3.20", + "url", ]   [[package]] @@ -10739,6 +11180,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "codspeed-criterion-compat", + "crossbeam-channel", "dashmap 6.1.0", "derive_more", "itertools 0.14.0", @@ -10833,9 +11275,8 @@ ]   [[package]] name = "revm" -version = "29.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718d90dce5f07e115d0e66450b1b8aa29694c1cf3f89ebddaddccc2ccbd2f13e" +version = "30.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-context", @@ -10852,21 +11293,19 @@ ]   [[package]] name = "revm-bytecode" -version = "6.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c52031b73cae95d84cd1b07725808b5fd1500da3e5e24574a3b2dc13d9f16d" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitvec", - "phf", + "phf 0.13.1", "revm-primitives", "serde", ]   [[package]] name = "revm-context" -version = "9.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a20c98e7008591a6f012550c2a00aa36cba8c14cc88eb88dec32eb9102554b4" +version = "10.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitvec", "cfg-if", @@ -10881,9 +11320,8 @@ ]   [[package]] name = "revm-context-interface" -version = "10.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b50d241ed1ce647b94caf174fcd0239b7651318b2c4c06b825b59b973dfb8495" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10897,9 +11335,8 @@ ]   [[package]] name = "revm-database" -version = "7.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a276ed142b4718dcf64bc9624f474373ed82ef20611025045c3fb23edbef9c" +version = "9.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10911,9 +11348,8 @@ ]   [[package]] name = "revm-database-interface" -version = "7.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c523c77e74eeedbac5d6f7c092e3851dbe9c7fec6f418b85992bd79229db361" +version = "8.0.2" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "either", @@ -10924,9 +11360,8 @@ ]   [[package]] name = "revm-handler" -version = "10.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "550331ea85c1d257686e672081576172fe3d5a10526248b663bbf54f1bef226a" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "derive-where", @@ -10943,9 +11378,8 @@ ]   [[package]] name = "revm-inspector" -version = "10.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0a6e9ccc2ae006f5bed8bd80cd6f8d3832cd55c5e861b9402fdd556098512f" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "either", @@ -10961,9 +11395,9 @@ ]   [[package]] name = "revm-inspectors" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9b329afcc0f9fd5adfa2c6349a7435a8558e82bcae203142103a9a95e2a63b6" +checksum = "0ce1228a7989cc3d9af84c0de2abe39680a252c265877e67d2f0fb4f392cb690" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10981,21 +11415,20 @@ ]   [[package]] name = "revm-interpreter" -version = "25.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06575dc51b1d8f5091daa12a435733a90b4a132dca7ccee0666c7db3851bc30c" +version = "27.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-context-interface", "revm-primitives", + "revm-state", "serde", ]   [[package]] name = "revm-precompile" -version = "27.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b57d4bd9e6b5fe469da5452a8a137bc2d030a3cd47c46908efc615bbc699da" +version = "28.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11008,20 +11441,18 @@ "blst", "c-kzg", "cfg-if", "k256", - "libsecp256k1", "p256", "revm-primitives", "ripemd", "rug", "secp256k1 0.31.1", - "sha2 0.10.9", + "sha2", ]   [[package]] name = "revm-primitives" -version = "20.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa29d9da06fe03b249b6419b33968ecdf92ad6428e2f012dc57bcd619b5d94e" +version = "21.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-primitives", "num_enum", @@ -11030,10 +11461,23 @@ "serde", ]   [[package]] +name = "revm-scroll" +version = "0.1.0" +source = "git+https://github.com/scroll-tech/scroll-revm?tag=scroll-v91#a1ac004adf0019d9926defc4e31e6a76a7e558f7" +dependencies = [ + "auto_impl", + "enumn", + "once_cell", + "revm", + "revm-inspector", + "revm-primitives", + "serde", +] + +[[package]] name = "revm-state" -version = "7.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f64fbacb86008394aaebd3454f9643b7d5a782bd251135e17c5b33da592d84d" +version = "8.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitflags 2.9.4", "revm-bytecode", @@ -11291,7 +11735,7 @@ "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.1", + "windows-sys 0.59.0", ]   [[package]] @@ -11318,7 +11762,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.5.0", ]   [[package]] @@ -11337,7 +11781,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1" dependencies = [ - "core-foundation", + "core-foundation 0.10.1", "core-foundation-sys", "jni", "log", @@ -11346,7 +11790,7 @@ "rustls", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework", + "security-framework 3.5.0", "security-framework-sys", "webpki-root-certs 0.26.11", "windows-sys 0.59.0", @@ -11465,6 +11909,155 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"   [[package]] +name = "scroll-alloy-consensus" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "arbitrary", + "bincode 1.3.3", + "derive_more", + "modular-bitfield", + "proptest", + "proptest-arbitrary-interop", + "rand 0.9.2", + "reth-codecs", + "reth-codecs-derive", + "serde", + "serde_json", + "serde_with", +] + +[[package]] +name = "scroll-alloy-evm" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-hardforks", + "alloy-primitives", + "auto_impl", + "encoder-standard", + "eyre", + "reth-evm", + "reth-scroll-chainspec", + "reth-scroll-evm", + "revm", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-hardforks", + "serde", +] + +[[package]] +name = "scroll-alloy-hardforks" +version = "1.8.2" +dependencies = [ + "alloy-hardforks", + "auto_impl", + "serde", +] + +[[package]] +name = "scroll-alloy-network" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-signer", + "scroll-alloy-consensus", + "scroll-alloy-rpc-types", +] + +[[package]] +name = "scroll-alloy-provider" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types-engine", + "alloy-transport", + "alloy-transport-http", + "async-trait", + "auto_impl", + "derive_more", + "eyre", + "futures-util", + "http-body-util", + "jsonrpsee", + "reqwest", + "reth-engine-primitives", + "reth-payload-builder", + "reth-payload-primitives", + "reth-primitives", + "reth-primitives-traits", + "reth-provider", + "reth-rpc-api", + "reth-rpc-builder", + "reth-rpc-engine-api", + "reth-scroll-chainspec", + "reth-scroll-engine-primitives", + "reth-scroll-node", + "reth-scroll-payload", + "reth-tasks", + "reth-tracing", + "reth-transaction-pool", + "scroll-alloy-network", + "scroll-alloy-rpc-types-engine", + "thiserror 2.0.16", + "tokio", + "tower", +] + +[[package]] +name = "scroll-alloy-rpc-types" +version = "1.8.2" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "arbitrary", + "derive_more", + "scroll-alloy-consensus", + "serde", + "serde_json", + "similar-asserts", +] + +[[package]] +name = "scroll-alloy-rpc-types-engine" +version = "1.8.2" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-engine", + "arbitrary", + "serde", + "serde_json", +] + +[[package]] +name = "scroll-reth" +version = "1.8.2" +dependencies = [ + "clap", + "reth-cli-util", + "reth-scroll-cli", + "reth-scroll-node", + "tracing", +] + +[[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -11522,12 +12115,25 @@ ]   [[package]] name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc198e42d9b7510827939c9a15f5062a0c913f3371d765977e586d2fe6c16f4a" dependencies = [ "bitflags 2.9.4", - "core-foundation", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -11719,19 +12325,6 @@ dependencies = [ "cfg-if", "cpufeatures", "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", ]   [[package]] @@ -12073,9 +12666,9 @@ ]   [[package]] name = "syn-solidity" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2375c17f6067adc651d8c2c51658019cef32edfff4a982adaf1d7fd1c039f08b" +checksum = "ff790eb176cc81bb8936aed0f7b9f14fc4670069a2d371b3e3b0ecce908b2cb3" dependencies = [ "paste", "proc-macro2", @@ -12117,6 +12710,27 @@ "windows 0.57.0", ]   [[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] name = "tagptr" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -12160,7 +12774,7 @@ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.1", + "windows-sys 0.59.0", ]   [[package]] @@ -12463,6 +13077,16 @@ "syn 2.0.106", ]   [[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] name = "tokio-rustls" version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -12589,9 +13213,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"   [[package]] name = "tonic" -version = "0.12.3" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", "base64 0.22.1", @@ -12601,11 +13225,22 @@ "http-body", "http-body-util", "percent-encoding", "pin-project", - "prost", + "sync_wrapper", "tokio-stream", "tower-layer", "tower-service", "tracing", +] + +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", ]   [[package]] @@ -12762,15 +13397,16 @@ ]   [[package]] name = "tracing-opentelemetry" -version = "0.30.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" dependencies = [ "js-sys", - "once_cell", "opentelemetry", "opentelemetry_sdk", + "rustversion", "smallvec", + "thiserror 2.0.16", "tracing", "tracing-core", "tracing-log", @@ -12837,7 +13473,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" dependencies = [ "cc", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ]   [[package]] @@ -13393,7 +14029,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.48.0", ]   [[package]] @@ -13480,19 +14116,6 @@ "windows-interface 0.59.2", "windows-link 0.1.3", "windows-result 0.3.4", "windows-strings 0.4.2", -] - -[[package]] -name = "windows-core" -version = "0.62.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" -dependencies = [ - "windows-implement 0.60.1", - "windows-interface 0.59.2", - "windows-link 0.2.0", - "windows-result 0.4.0", - "windows-strings 0.5.0", ]   [[package]] @@ -13595,6 +14218,17 @@ "windows-link 0.1.3", ]   [[package]] +name = "windows-registry" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" +dependencies = [ + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + +[[package]] name = "windows-result" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -13622,15 +14256,6 @@ "windows-link 0.1.3", ]   [[package]] -name = "windows-result" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" -dependencies = [ - "windows-link 0.2.0", -] - -[[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -13647,15 +14272,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link 0.1.3", -] - -[[package]] -name = "windows-strings" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" -dependencies = [ - "windows-link 0.2.0", ]   [[package]]
diff --git reth/Cargo.toml scroll-reth/Cargo.toml index 888ff2ad9d27bf852965c2970b345b804896fcaa..fb12f7372c5d6f1fa9f970782dbf710396d2736f 100644 --- reth/Cargo.toml +++ scroll-reth/Cargo.toml @@ -110,6 +110,25 @@ "crates/rpc/rpc-testing-util/", "crates/rpc/rpc-e2e-tests/", "crates/rpc/rpc-convert/", "crates/rpc/rpc/", + "crates/scroll/alloy/consensus", + "crates/scroll/alloy/evm", + "crates/scroll/alloy/hardforks", + "crates/scroll/alloy/network", + "crates/scroll/alloy/provider", + "crates/scroll/alloy/rpc-types", + "crates/scroll/alloy/rpc-types-engine", + "crates/scroll/bin/scroll-reth", + "crates/scroll/chainspec", + "crates/scroll/cli", + "crates/scroll/consensus", + "crates/scroll/engine-primitives", + "crates/scroll/evm", + "crates/scroll/hardforks", + "crates/scroll/node", + "crates/scroll/payload", + "crates/scroll/primitives", + "crates/scroll/txpool", + "crates/scroll/rpc", "crates/stages/api/", "crates/stages/stages/", "crates/stages/types/", @@ -454,9 +473,10 @@ reth-storage-errors = { path = "crates/storage/errors", default-features = false } reth-tasks = { path = "crates/tasks" } reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } -reth-tracing = { path = "crates/tracing" } +reth-tracing = { path = "crates/tracing", default-features = false } +reth-tracing-otlp = { path = "crates/tracing-otlp" } reth-transaction-pool = { path = "crates/transaction-pool" } -reth-trie = { path = "crates/trie/trie" } +reth-trie = { path = "crates/trie/trie", default-features = false } reth-trie-common = { path = "crates/trie/common", default-features = false } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } @@ -467,31 +487,32 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" }   # revm -revm = { version = "29.0.1", default-features = false } -revm-bytecode = { version = "6.2.2", default-features = false } -revm-database = { version = "7.0.5", default-features = false } -revm-state = { version = "7.0.5", default-features = false } -revm-primitives = { version = "20.2.1", default-features = false } -revm-interpreter = { version = "25.0.3", default-features = false } -revm-inspector = { version = "10.0.1", default-features = false } -revm-context = { version = "9.1.0", default-features = false } -revm-context-interface = { version = "10.2.0", default-features = false } -revm-database-interface = { version = "7.0.5", default-features = false } -op-revm = { version = "10.1.0", default-features = false } -revm-inspectors = "0.30.0" +revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", tag = "scroll-v91", default-features = false } +revm-inspectors = "0.31.0"   # eth alloy-chains = { version = "0.2.5", default-features = false } -alloy-dyn-abi = "1.3.1" +alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.21.2", default-features = false } -alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] } +alloy-evm = { version = "0.22.0", default-features = false } +alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.3.1" -alloy-sol-types = { version = "1.3.1", default-features = false } +alloy-sol-macro = "1.4.1" +alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false }   -alloy-hardforks = "0.3.5" +alloy-hardforks = "0.4.0"   alloy-consensus = { version = "1.0.37", default-features = false } alloy-contract = { version = "1.0.37", default-features = false } @@ -521,9 +542,30 @@ alloy-transport-http = { version = "1.0.37", features = ["reqwest-rustls-tls"], default-features = false } alloy-transport-ipc = { version = "1.0.37", default-features = false } alloy-transport-ws = { version = "1.0.37", default-features = false }   +# scroll +scroll-alloy-consensus = { path = "crates/scroll/alloy/consensus", default-features = false } +scroll-alloy-evm = { path = "crates/scroll/alloy/evm", default-features = false } +scroll-alloy-hardforks = { path = "crates/scroll/alloy/hardforks", default-features = false } +scroll-alloy-network = { path = "crates/scroll/alloy/network", default-features = false } +scroll-alloy-rpc-types = { path = "crates/scroll/alloy/rpc-types", default-features = false } +scroll-alloy-rpc-types-engine = { path = "crates/scroll/alloy/rpc-types-engine", default-features = false } +scroll-alloy-provider = { path = "crates/scroll/alloy/provider" } +reth-scroll-chainspec = { path = "crates/scroll/chainspec", default-features = false } +reth-scroll-cli = { path = "crates/scroll/cli" } +reth-scroll-consensus = { path = "crates/scroll/consensus" } +reth-scroll-evm = { path = "crates/scroll/evm", default-features = false } +reth-scroll-engine-primitives = { path = "crates/scroll/engine-primitives" } +reth-scroll-forks = { path = "crates/scroll/hardforks", default-features = false } +reth-scroll-node = { path = "crates/scroll/node" } +reth-scroll-payload = { path = "crates/scroll/payload" } +reth-scroll-primitives = { path = "crates/scroll/primitives", default-features = false } +reth-scroll-rpc = { path = "crates/scroll/rpc" } +reth-scroll-trie = { path = "crates/scroll/trie" } +reth-scroll-txpool = { path = "crates/scroll/txpool" } + # op -alloy-op-evm = { version = "0.21.2", default-features = false } -alloy-op-hardforks = "0.3.5" +alloy-op-evm = { version = "0.22.0", default-features = false } +alloy-op-hardforks = "0.4.0" op-alloy-rpc-types = { version = "0.20.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.20.0", default-features = false } op-alloy-network = { version = "0.20.0", default-features = false } @@ -534,7 +576,7 @@ # misc either = { version = "1.15.0", default-features = false } aquamarine = "0.6" -auto_impl = "1" +auto_impl = { version = "1", default-features = false } backon = { version = "1.2", default-features = false, features = ["std-blocking-sleep", "tokio-sleep"] } bincode = "1.3" bitflags = "2.4" @@ -585,6 +627,7 @@ mini-moka = "0.10" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } chrono = "0.4.41" +encoder-standard = { git = "https://github.com/scroll-tech/da-codec", default-features = false }   # metrics metrics = "0.24.0" @@ -649,6 +692,13 @@ # config toml = "0.8"   +# otlp obs +opentelemetry_sdk = "0.31" +opentelemetry = "0.31" +opentelemetry-otlp = "0.31" +opentelemetry-semantic-conventions = "0.31" +tracing-opentelemetry = "0.32" + # misc-testing arbitrary = "1.3" assert_matches = "1.5.0" @@ -720,40 +770,42 @@ visibility = "0.1.1" walkdir = "2.3.3" vergen-git2 = "1.0.5"   -# [patch.crates-io] -# alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } -# alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } - -# op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "a79d6fc" } -# op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "a79d6fc" } -# op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "a79d6fc" } -# op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "a79d6fc" } -# op-alloy-rpc-jsonrpsee = { git = "https://github.com/alloy-rs/op-alloy", rev = "a79d6fc" } +[patch.crates-io] +revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" } +op-revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" } +# alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } +# +# op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "ad607c1" } +# op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "ad607c1" } +# op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "ad607c1" } +# op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "ad607c1" } +# op-alloy-rpc-jsonrpsee = { git = "https://github.com/alloy-rs/op-alloy", rev = "ad607c1" } # # revm-inspectors = { git = "https://github.com/paradigmxyz/revm-inspectors", rev = "1207e33" } #
diff --git reth/Cross.toml scroll-reth/Cross.toml index 9b4fd44f75248c31f2591afc76289148786111ef..eff8da32fdef48acc4e92fd81e4c78460f0285b4 100644 --- reth/Cross.toml +++ scroll-reth/Cross.toml @@ -34,5 +34,14 @@ env.passthrough = [ "CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER=riscv64-linux-gnu-gcc", ]   +[target.x86_64-pc-windows-gnu] +# Why do we need a custom Dockerfile on Windows: +# 1. `reth-libmdbx` stopped working with MinGW 9.3 that cross image comes with. +# 2. To be able to update the version of MinGW, we need to also update the Ubuntu that the image is based on. +# +# Also see https://github.com/cross-rs/cross/issues/1667 +# Inspired by https://github.com/cross-rs/cross/blob/9e2298e17170655342d3248a9c8ac37ef92ba38f/docker/Dockerfile.x86_64-pc-windows-gnu#L51 +dockerfile = "./Dockerfile.x86_64-pc-windows-gnu" + [build.env] passthrough = ["JEMALLOC_SYS_WITH_LG_PAGE"]
diff --git reth/Makefile scroll-reth/Makefile index 8d8b0a5b3a5fffbc50414d74cde89b4c67d86aa1..b039610ee6a1e0924f8333eca425d34ce9b6ef75 100644 --- reth/Makefile +++ scroll-reth/Makefile @@ -60,6 +60,12 @@ --features "$(FEATURES)" \ --profile "$(PROFILE)" \ $(CARGO_INSTALL_EXTRA_FLAGS)   +.PHONY: install-scroll +install-scroll: ## Build and install the scroll-reth binary under `~/.cargo/bin`. + cargo install --path crates/scroll/bin/scroll-reth --bin scroll-reth --force --locked \ + --profile "$(PROFILE)" \ + $(CARGO_INSTALL_EXTRA_FLAGS) + .PHONY: build build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" @@ -435,6 +441,37 @@ --tests \ --benches \ --locked \ --all-features + +lint-scroll-reth: + cargo +nightly clippy \ + --workspace \ + --bin "scroll-reth" \ + --lib \ + --examples \ + --tests \ + --benches \ + --features "$(BIN_OTHER_FEATURES)" \ + -- -D warnings + +lint-all: + cargo +nightly clippy \ + --workspace \ + --lib \ + --examples \ + --tests \ + --benches \ + --all-features \ + --locked + +lint-udeps: + cargo +nightly udeps --workspace --lib --examples --tests --benches --all-features --locked \ + --exclude reth-optimism-cli --exclude reth-optimism-consensus --exclude reth-optimism-payload-builder \ + --exclude reth-optimism-node --exclude reth-optimism-evm --exclude reth-optimism-node --exclude reth-optimism-rpc \ + --exclude op-reth --exclude "example-*" --exclude reth --exclude reth-payload-primitives \ + --exclude reth-e2e-test-utils --exclude reth-ethereum-payload-builder --exclude reth-exex-test-utils \ + --exclude reth-node-ethereum --exclude reth-scroll-cli --exclude reth-scroll-evm \ + --exclude reth-scroll-node --exclude "scroll-reth" --exclude reth-scroll-rpc \ + --exclude reth-scroll-trie   lint-typos: ensure-typos typos
diff --git reth/claude/README.md scroll-reth/claude/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7830d53971f0d416406c7195ddf1db5643a03cf2 --- /dev/null +++ scroll-reth/claude/README.md @@ -0,0 +1,48 @@ +# Claude Instructions and Tools + +This directory contains instructions and automation tools for AI-assisted development workflows in the Reth repository. + +## Contents + +### 📄 Instructions + +- **`grafana-dashboard-sync-instructions.md`** - Complete guide for synchronizing Grafana dashboards from upstream to Scroll's Kubernetes-customized versions + +### 🛠️ Tools + +Located in `tools/` directory: + +- **`sync_dashboard.py`** - Automated dashboard synchronization script +- **`compare_dashboards.py`** - Dashboard comparison and analysis tool +- **`detailed_dashboard_analysis.py`** - Detailed query-level analysis tool + +## Usage + +### Grafana Dashboard Synchronization + +To sync Grafana dashboards with upstream: + +```bash +# From repository root +python3 claude/tools/compare_dashboards.py > comparison.txt +python3 claude/tools/sync_dashboard.py +``` + +See `grafana-dashboard-sync-instructions.md` for complete documentation. + +## Purpose + +This directory serves as a knowledge base for repeatable AI-assisted workflows. When working with AI assistants (like Claude), these instructions help ensure consistent and correct execution of complex tasks. + +## Contributing + +When adding new workflows: + +1. Create a detailed instruction file (`.md`) +2. Add any supporting scripts to `tools/` +3. Update this README with a brief description +4. Include usage examples and prerequisites + +--- + +**Last updated:** 2025-12-01
diff --git reth/claude/grafana-dashboard-sync-instructions.md scroll-reth/claude/grafana-dashboard-sync-instructions.md new file mode 100644 index 0000000000000000000000000000000000000000..c0e3f94819909407f20152854bf1127ba155653d --- /dev/null +++ scroll-reth/claude/grafana-dashboard-sync-instructions.md @@ -0,0 +1,431 @@ +# Grafana Dashboard Synchronization Instructions + +## Overview + +This document provides instructions for synchronizing Scroll's Kubernetes-customized Grafana dashboards with upstream reth dashboards. This process should be performed periodically when upstream updates are released. + +## Directory Structure + +- **Upstream dashboards:** `etc/grafana/dashboards/` (canonical source) +- **Scroll K8s dashboards:** `etc/grafana/scroll/` (customized versions) + +## When to Sync + +Sync dashboards when: +- Upstream reth releases new dashboard versions +- New monitoring features are added upstream +- Structural changes are made to upstream dashboards +- Every 1-3 months as part of regular maintenance + +## Synchronization Process + +### Step 1: Analyze Differences + +First, understand what has changed upstream: + +```bash +# Run the comparison script to identify changes +python3 claude/tools/compare_dashboards.py > comparison_report.txt + +# Review the report +cat comparison_report.txt +``` + +The comparison will show: +- **Case A:** Dashboards that exist in both directories (need sync) +- **Case B:** New dashboards in upstream only (evaluate for porting) +- **Case C:** Dashboards only in scroll (evaluate for retention) + +### Step 2: Execute Synchronization + +Run the automated sync script: + +```bash +# Execute the sync script +python3 claude/tools/sync_dashboard.py +``` + +This script will: +- Use upstream dashboards as the base structure +- Add K8s variables (env, service) to all dashboards - NO pod variable +- Preserve dashboard-specific variables (e.g., interval) alongside K8s variables +- Transform all PromQL queries to use service-only label selectors +- Hardcode datasource UID `o59qe-zVz` in all panels +- Preserve Scroll UIDs +- Save updated dashboards to `etc/grafana/scroll/` + +### Step 3: Validate Changes + +Verify the synchronization was successful: + +```bash +# Validate JSON syntax +python3 -c " +import json +from pathlib import Path + +for f in Path('etc/grafana/scroll').glob('*.json'): + with open(f) as fp: + data = json.load(fp) + print(f'✓ {f.name} - Valid ({len(data.get(\"panels\", []))} panels)') +" + +# Review changes +git diff etc/grafana/scroll/ +``` + +### Step 4: Manual Review + +Check for: +- **New panels:** Review new upstream panels and verify they work with K8s labels +- **Removed panels:** Check if any scroll-specific panels were lost +- **Query correctness:** Spot-check that K8s label transformations are correct +- **Variable definitions:** Ensure K8s variables are present in all dashboards + +### Step 5: Test in Grafana + +Before committing: + +1. **Deploy to staging/test Grafana instance** + ```bash + # Update ConfigMap (adjust for your deployment) + kubectl create configmap grafana-dashboards \ + --from-file=etc/grafana/scroll/ \ + --dry-run=client -o yaml | kubectl apply -f - + + kubectl rollout restart deployment/grafana -n monitoring + ``` + +2. **Test each dashboard:** + - [ ] Variables populate correctly (env, service) - only 2 variables + - [ ] All panels display data + - [ ] No query errors + - [ ] New panels work as expected + - [ ] Time ranges and refresh work + +3. **Performance check:** + - [ ] Dashboard load time < 5 seconds + - [ ] No Prometheus query timeouts + - [ ] Query execution time acceptable + +### Step 6: Commit Changes + +If all tests pass: + +```bash +# Stage changes +git add etc/grafana/scroll/ + +# Commit with descriptive message +git commit -m "feat: sync Grafana dashboards with upstream + +- Update all dashboards with latest upstream structure +- Add [N] new panels from upstream +- Transform queries to use K8s label selectors +- Preserve Scroll UIDs and K8s customizations + +New panels: +- [List notable new panels] + +Testing: Verified all queries and variables work in [environment]" + +# Push changes +git push +``` + +## Kubernetes Customization Pattern + +### Standard K8s Variables + +All Scroll dashboards must include these variables (2 only - NO pod variable): + +```json +{ + "name": "env", + "type": "query", + "definition": "label_values(env)", + "query": { + "qryType": 1, + "query": "label_values(env)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "regex": "(sepolia|mainnet)-eks.*" +} + +{ + "name": "service", + "type": "query", + "definition": "label_values(reth_info{namespace=\"$env\"},service)", + "query": { + "qryType": 1, + "query": "label_values(reth_info{namespace=\"$env\"},service)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "regex": "(l[1|2]reth.*)" +} +``` + +**Important:** +- No `pod` variable - queries aggregate by service only, enabling data continuity when pods are replaced +- No `datasource` variable - datasource UID is hardcoded in all panels +- **Dashboard-specific variables are preserved:** Some dashboards have additional variables (e.g., `interval` in reth-state-growth.json) that must be preserved alongside the K8s variables + +### Hardcoded Datasource + +All panels and targets use a hardcoded Prometheus datasource UID: + +```json +{ + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + } +} +``` + +This matches the Scroll deployment's Prometheus datasource configuration. + +### Query Transformation Rules + +The sync script applies these transformations: + +| Upstream Pattern | Scroll Pattern (K8s) | +|------------------|----------------------| +| `$instance_label="$instance"` | `service="$service", namespace="$env"` | +| `instance="$instance"` | `service="$service", namespace="$env"` | +| `instance=~"$instance"` | `service="$service", namespace="$env"` | + +**Important:** +- Uses exact match (`=`) not regex match (`=~`) for precise service filtering +- Includes `namespace="$env"` to prevent cross-environment data aggregation + +**Example:** +```promql +# Upstream: +reth_database_operation_duration{$instance_label="$instance", quantile="0.99"} + +# Scroll (after transformation): +reth_database_operation_duration{service="$service", namespace="$env", quantile="0.99"} +``` + +### Data Continuity Feature + +By using **service and namespace** filtering (no pod label), dashboards maintain historical data when pods are replaced: +- Old pod dies → new pod starts with different name +- Both pods share the same `service` and `namespace` labels +- Queries aggregate across all pods for that service in that environment +- Historical data remains visible seamlessly +- **Exact match** ensures `service-0` only shows `service-0` data, not `service-1` +- **Namespace filter** prevents cross-environment data mixing (mainnet vs sepolia) + +## Handling Special Cases + +### New Upstream Dashboards (Case B) + +When upstream adds a new dashboard: + +1. **Evaluate relevance:** + - Is it applicable to Scroll's deployment? + - Does it use metrics available in Scroll's reth build? + - Is it worth maintaining? + +2. **If relevant, port it:** + - Copy upstream dashboard as base + - Run sync script or manually add K8s variables + - Transform all queries + - Test thoroughly + - Add to `etc/grafana/scroll/` + +3. **If not relevant:** + - Document decision in git commit + - Skip porting + +### Scroll-Only Dashboards (Case C) + +If Scroll has custom dashboards not in upstream: + +1. **Check if metrics are now in upstream:** + - Search upstream dashboards for the same metrics + - If covered, consider removing custom dashboard + +2. **If still unique:** + - Keep the custom dashboard + - Ensure it follows K8s variable pattern + - Document its purpose + +### Major Upstream Changes + +If upstream significantly refactors dashboard structure: + +1. **Backup current scroll versions:** + ```bash + cp -r etc/grafana/scroll etc/grafana/scroll.backup.$(date +%Y%m%d) + ``` + +2. **Run sync with caution:** + - Review changes carefully + - Test extensively before committing + - Consider phased rollout (one dashboard at a time) + +3. **Document breaking changes:** + - Note in commit message + - Update team documentation + - Inform monitoring team + +## Troubleshooting + +### Issue: Sync script fails with errors + +**Solution:** +- Check that upstream dashboards are valid JSON +- Verify Python 3 is available +- Review error message and fix specific issue +- May need to update sync script for new patterns + +### Issue: Variables don't populate after sync + +**Solution:** +- Verify Prometheus has required labels: `env`, `pod`, `service`, `namespace` +- Check variable query syntax in dashboard JSON +- Test queries directly in Prometheus UI +- Ensure label names match your Helm deployment + +### Issue: Queries return no data after transformation + +**Solution:** +- Check that K8s label selectors match your deployment +- Verify metric label structure in Prometheus +- Test transformed query directly in Prometheus +- May need to adjust label names in transformation + +### Issue: New panels from upstream don't work + +**Solution:** +- Verify metrics exist in your reth build version +- Check if feature flags are needed +- Some metrics may be version-specific or feature-specific +- Consider removing panels for unavailable features + +## Maintenance Scripts + +All scripts are located in `claude/tools/` directory. + +### compare_dashboards.py + +Compares upstream and scroll dashboards, showing: +- Panel count differences +- Variable differences +- Structural changes +- K8s customization patterns + +**Usage:** +```bash +python3 claude/tools/compare_dashboards.py > report.txt +``` + +### sync_dashboard.py + +Automated synchronization script that: +- Loads upstream as base +- Adds K8s variables +- Transforms all queries +- Preserves Scroll UIDs +- Saves to scroll directory + +**Usage:** +```bash +python3 claude/tools/sync_dashboard.py +``` + +**Customization:** +Edit the `dashboards` list in `main()` to add/remove dashboards to sync. + +### detailed_dashboard_analysis.py + +Generates detailed analysis including: +- Query-by-query comparison +- Specific transformation plans +- Migration guidance + +**Usage:** +```bash +python3 claude/tools/detailed_dashboard_analysis.py > analysis.txt +``` + +## Quick Reference Commands + +```bash +# Full sync workflow +python3 claude/tools/compare_dashboards.py > comparison.txt +cat comparison.txt +python3 claude/tools/sync_dashboard.py +git diff etc/grafana/scroll/ +# Review, test, commit + +# Validate JSON +python3 -m json.tool etc/grafana/scroll/*.json > /dev/null && echo "All valid" + +# Check file sizes +ls -lh etc/grafana/scroll/ + +# Count panels per dashboard +for f in etc/grafana/scroll/*.json; do + echo "$(basename $f): $(jq '.panels | length' $f) panels" +done + +# Extract all metrics used +for f in etc/grafana/scroll/*.json; do + jq -r '.. | .expr? // empty' $f | grep -oE 'reth_\w+' | sort -u +done +``` + +## Best Practices + +1. **Always backup before major syncs:** + ```bash + cp -r etc/grafana/scroll etc/grafana/scroll.backup.$(date +%Y%m%d) + ``` + +2. **Test in non-production first:** + - Deploy to staging/dev Grafana + - Verify all functionality + - Get team review + +3. **Sync regularly:** + - Monthly check for upstream changes + - Don't let versions drift too far + +4. **Document custom changes:** + - If you manually modify dashboards, document why + - Consider contributing improvements upstream + +5. **Keep scripts updated:** + - Update transformation patterns as needed + - Add new label patterns if deployment changes + +6. **Version control everything:** + - Commit dashboards to git + - Use descriptive commit messages + - Tag releases if using versioned deployments + +## Resources + +- **Upstream reth repository:** https://github.com/paradigmxyz/reth +- **Grafana documentation:** https://grafana.com/docs/ +- **PromQL documentation:** https://prometheus.io/docs/prometheus/latest/querying/basics/ + +## Contact + +For questions about this process: +- Review git history for previous sync commits +- Check `GRAFANA_DASHBOARD_SYNC_PLAN.md` for detailed analysis +- Consult with the monitoring/observability team + +--- + +**Last updated:** 2025-12-01 +**Last sync:** 2025-12-01 (Converged with upstream, service-only pattern for data continuity) +**Pattern:** +- 2 variables only: `env`, `service` (NO pod variable) +- Hardcoded datasource UID: `o59qe-zVz` +- Enables seamless pod replacement with data continuity
diff --git reth/claude/tools/compare_dashboards.py scroll-reth/claude/tools/compare_dashboards.py new file mode 100644 index 0000000000000000000000000000000000000000..87b80b4734930b16eac70f1c6d06e9551d7d3b6b --- /dev/null +++ scroll-reth/claude/tools/compare_dashboards.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +""" +Grafana Dashboard Comparison Tool +Compares upstream and scroll-customized dashboards +""" + +import json +import sys +from pathlib import Path +from typing import Dict, List, Set, Any +from collections import defaultdict + +def load_json(filepath: Path) -> Dict: + """Load JSON dashboard file""" + with open(filepath, 'r') as f: + return json.load(f) + +def extract_panel_info(panel: Dict) -> Dict: + """Extract key information from a panel""" + return { + 'id': panel.get('id'), + 'title': panel.get('title', 'N/A'), + 'type': panel.get('type'), + 'datasource': panel.get('datasource'), + 'gridPos': panel.get('gridPos'), + 'targets_count': len(panel.get('targets', [])), + 'targets': [ + { + 'expr': t.get('expr', 'N/A')[:100], # Truncate long queries + 'legendFormat': t.get('legendFormat'), + } + for t in panel.get('targets', []) + ] + } + +def extract_template_vars(dashboard: Dict) -> List[Dict]: + """Extract template variables""" + templating = dashboard.get('templating', {}) + variables = templating.get('list', []) + return [ + { + 'name': v.get('name'), + 'type': v.get('type'), + 'query': str(v.get('query', ''))[:100], + 'datasource': v.get('datasource'), + } + for v in variables + ] + +def extract_dashboard_structure(filepath: Path) -> Dict: + """Extract key structural elements from dashboard""" + data = load_json(filepath) + + panels = [] + for panel in data.get('panels', []): + if panel.get('type') == 'row': + # Row with nested panels + panels.append({ + 'type': 'row', + 'title': panel.get('title'), + 'collapsed': panel.get('collapsed', False), + }) + for subpanel in panel.get('panels', []): + panels.append(extract_panel_info(subpanel)) + else: + panels.append(extract_panel_info(panel)) + + return { + 'title': data.get('title'), + 'uid': data.get('uid'), + 'version': data.get('version'), + 'panels_count': len(panels), + 'panels': panels, + 'variables': extract_template_vars(data), + 'variables_count': len(extract_template_vars(data)), + 'refresh': data.get('refresh'), + 'time': data.get('time'), + } + +def find_kubernetes_patterns(filepath: Path) -> Dict[str, List[str]]: + """Find Kubernetes-specific patterns in the dashboard""" + with open(filepath, 'r') as f: + content = f.read() + + patterns = { + 'namespace_selectors': [], + 'pod_selectors': [], + 'job_selectors': [], + 'release_selectors': [], + 'k8s_labels': [], + } + + # Search for common Kubernetes label patterns + import re + + # Find namespace patterns + namespace_matches = re.findall(r'namespace=~?"([^"]+)"', content) + patterns['namespace_selectors'] = list(set(namespace_matches)) + + # Find pod patterns + pod_matches = re.findall(r'pod=~?"([^"]+)"', content) + patterns['pod_selectors'] = list(set(pod_matches)) + + # Find job patterns + job_matches = re.findall(r'job=~?"([^"]+)"', content) + patterns['job_selectors'] = list(set(job_matches)) + + # Find release patterns + release_matches = re.findall(r'release=~?"([^"]+)"', content) + patterns['release_selectors'] = list(set(release_matches)) + + # Find variable references + var_matches = re.findall(r'\$(\w+)', content) + patterns['k8s_labels'] = [v for v in set(var_matches) if v in ['namespace', 'pod', 'job', 'release', 'instance']] + + return patterns + +def compare_panels(upstream_panels: List[Dict], scroll_panels: List[Dict]) -> Dict: + """Compare panel structures between upstream and scroll""" + upstream_titles = {p.get('title'): p for p in upstream_panels if p.get('title')} + scroll_titles = {p.get('title'): p for p in scroll_panels if p.get('title')} + + common_titles = set(upstream_titles.keys()) & set(scroll_titles.keys()) + only_upstream = set(upstream_titles.keys()) - set(scroll_titles.keys()) + only_scroll = set(scroll_titles.keys()) - set(upstream_titles.keys()) + + query_diffs = [] + for title in common_titles: + up = upstream_titles[title] + sc = scroll_titles[title] + + if up.get('targets') != sc.get('targets'): + query_diffs.append({ + 'title': title, + 'upstream_targets': up.get('targets'), + 'scroll_targets': sc.get('targets'), + }) + + return { + 'common_panels': len(common_titles), + 'only_upstream': sorted(list(only_upstream)), + 'only_scroll': sorted(list(only_scroll)), + 'query_differences': query_diffs, + } + +def compare_variables(upstream_vars: List[Dict], scroll_vars: List[Dict]) -> Dict: + """Compare template variables""" + upstream_names = {v['name']: v for v in upstream_vars} + scroll_names = {v['name']: v for v in scroll_vars} + + common = set(upstream_names.keys()) & set(scroll_names.keys()) + only_upstream = set(upstream_names.keys()) - set(scroll_names.keys()) + only_scroll = set(scroll_names.keys()) - set(upstream_names.keys()) + + differences = [] + for name in common: + if upstream_names[name] != scroll_names[name]: + differences.append({ + 'name': name, + 'upstream': upstream_names[name], + 'scroll': scroll_names[name], + }) + + return { + 'common_variables': len(common), + 'only_upstream': sorted(list(only_upstream)), + 'only_scroll': sorted(list(only_scroll)), + 'modified': differences, + } + +def main(): + upstream_dir = Path('etc/grafana/dashboards') + scroll_dir = Path('etc/grafana/scroll') + + # Find all dashboards + upstream_files = {f.name: f for f in upstream_dir.glob('*.json')} + scroll_files = {f.name: f for f in scroll_dir.glob('*.json')} + + print("=" * 80) + print("GRAFANA DASHBOARD COMPARISON REPORT") + print("=" * 80) + print() + + # Case A: Dashboards in both directories + common_files = set(upstream_files.keys()) & set(scroll_files.keys()) + print(f"CASE A: Dashboards in both directories ({len(common_files)})") + print("-" * 80) + + for filename in sorted(common_files): + print(f"\n### {filename}") + print() + + upstream_path = upstream_files[filename] + scroll_path = scroll_files[filename] + + # Extract structures + upstream_struct = extract_dashboard_structure(upstream_path) + scroll_struct = extract_dashboard_structure(scroll_path) + + # Find K8s patterns + k8s_patterns = find_kubernetes_patterns(scroll_path) + + print(f"Title: {upstream_struct['title']}") + print(f"Upstream UID: {upstream_struct['uid']}") + print(f"Scroll UID: {scroll_struct['uid']}") + print() + + print(f"Panels: Upstream={upstream_struct['panels_count']}, Scroll={scroll_struct['panels_count']}") + print(f"Variables: Upstream={upstream_struct['variables_count']}, Scroll={scroll_struct['variables_count']}") + print() + + # Compare panels + panel_comparison = compare_panels(upstream_struct['panels'], scroll_struct['panels']) + print(f"Panel Analysis:") + print(f" - Common panels: {panel_comparison['common_panels']}") + if panel_comparison['only_upstream']: + print(f" - Only in upstream: {panel_comparison['only_upstream']}") + if panel_comparison['only_scroll']: + print(f" - Only in scroll: {panel_comparison['only_scroll']}") + if panel_comparison['query_differences']: + print(f" - Panels with query differences: {len(panel_comparison['query_differences'])}") + print() + + # Compare variables + var_comparison = compare_variables(upstream_struct['variables'], scroll_struct['variables']) + print(f"Variable Analysis:") + print(f" - Common variables: {var_comparison['common_variables']}") + if var_comparison['only_upstream']: + print(f" - Only in upstream: {var_comparison['only_upstream']}") + if var_comparison['only_scroll']: + print(f" - Only in scroll: {var_comparison['only_scroll']}") + if var_comparison['modified']: + print(f" - Modified variables: {[v['name'] for v in var_comparison['modified']]}") + print() + + # Show K8s patterns + print("Kubernetes-specific patterns in scroll version:") + for key, values in k8s_patterns.items(): + if values: + print(f" - {key}: {values}") + print() + + print("-" * 80) + + # Case B: Only in upstream + only_upstream = set(upstream_files.keys()) - set(scroll_files.keys()) + print(f"\nCASE B: Dashboards only in upstream ({len(only_upstream)})") + print("-" * 80) + for filename in sorted(only_upstream): + upstream_path = upstream_files[filename] + upstream_struct = extract_dashboard_structure(upstream_path) + print(f"\n### {filename}") + print(f"Title: {upstream_struct['title']}") + print(f"Panels: {upstream_struct['panels_count']}") + print(f"Variables: {upstream_struct['variables_count']}") + print() + + # Case C: Only in scroll + only_scroll = set(scroll_files.keys()) - set(upstream_files.keys()) + print(f"\nCASE C: Dashboards only in scroll ({len(only_scroll)})") + print("-" * 80) + for filename in sorted(only_scroll): + scroll_path = scroll_files[filename] + scroll_struct = extract_dashboard_structure(scroll_path) + k8s_patterns = find_kubernetes_patterns(scroll_path) + + print(f"\n### {filename}") + print(f"Title: {scroll_struct['title']}") + print(f"Panels: {scroll_struct['panels_count']}") + print(f"Variables: {scroll_struct['variables_count']}") + print("\nKubernetes-specific patterns:") + for key, values in k8s_patterns.items(): + if values: + print(f" - {key}: {values}") + print() + +if __name__ == '__main__': + main()
diff --git reth/claude/tools/detailed_dashboard_analysis.py scroll-reth/claude/tools/detailed_dashboard_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..0b7daac43cb4c2ae79b6ceda3c4ab035f4340eed --- /dev/null +++ scroll-reth/claude/tools/detailed_dashboard_analysis.py @@ -0,0 +1,399 @@ +#!/usr/bin/env python3 +""" +Detailed Grafana Dashboard Analysis +Provides specific migration and update plans for each dashboard +""" + +import json +from pathlib import Path +from typing import Dict, List, Set, Any +import re + +def load_json(filepath: Path) -> Dict: + """Load JSON dashboard file""" + with open(filepath, 'r') as f: + return json.load(f) + +def extract_all_queries(dashboard: Dict) -> List[Dict]: + """Extract all PromQL queries from dashboard""" + queries = [] + + def process_panel(panel: Dict, parent_title: str = None): + title = panel.get('title', 'Untitled') + if parent_title: + title = f"{parent_title} > {title}" + + for target in panel.get('targets', []): + if 'expr' in target: + queries.append({ + 'panel': title, + 'expr': target['expr'], + 'legendFormat': target.get('legendFormat', ''), + 'refId': target.get('refId', ''), + }) + + for panel in dashboard.get('panels', []): + if panel.get('type') == 'row': + row_title = panel.get('title') + for subpanel in panel.get('panels', []): + process_panel(subpanel, row_title) + else: + process_panel(panel) + + return queries + +def analyze_query_patterns(query: str) -> Dict[str, List[str]]: + """Analyze PromQL query for label patterns""" + patterns = { + 'label_filters': [], + 'variables': [], + 'functions': [], + 'metrics': [], + } + + # Extract label filters + label_matches = re.findall(r'(\w+)=~?"([^"]+)"', query) + patterns['label_filters'] = [(k, v) for k, v in label_matches] + + # Extract variables + var_matches = re.findall(r'\$(\w+)', query) + patterns['variables'] = list(set(var_matches)) + + # Extract metric names + metric_matches = re.findall(r'\b(reth_\w+|process_\w+|go_\w+|node_\w+)', query) + patterns['metrics'] = list(set(metric_matches)) + + # Extract functions + func_matches = re.findall(r'\b(rate|irate|increase|sum|avg|max|min|count|topk|bottomk)\s*\(', query) + patterns['functions'] = list(set(func_matches)) + + return patterns + +def compare_queries_detailed(upstream_queries: List[Dict], scroll_queries: List[Dict]) -> List[Dict]: + """Compare queries in detail""" + differences = [] + + # Create lookup by panel name + upstream_by_panel = {q['panel']: q for q in upstream_queries} + scroll_by_panel = {q['panel']: q for q in scroll_queries} + + for panel_name in set(upstream_by_panel.keys()) | set(scroll_by_panel.keys()): + up_query = upstream_by_panel.get(panel_name) + sc_query = scroll_by_panel.get(panel_name) + + if up_query and sc_query: + if up_query['expr'] != sc_query['expr']: + up_patterns = analyze_query_patterns(up_query['expr']) + sc_patterns = analyze_query_patterns(sc_query['expr']) + + differences.append({ + 'panel': panel_name, + 'status': 'modified', + 'upstream_query': up_query['expr'], + 'scroll_query': sc_query['expr'], + 'upstream_patterns': up_patterns, + 'scroll_patterns': sc_patterns, + 'added_labels': [l for l in sc_patterns['label_filters'] if l not in up_patterns['label_filters']], + 'removed_labels': [l for l in up_patterns['label_filters'] if l not in sc_patterns['label_filters']], + 'added_variables': [v for v in sc_patterns['variables'] if v not in up_patterns['variables']], + 'removed_variables': [v for v in up_patterns['variables'] if v not in sc_patterns['variables']], + }) + elif up_query and not sc_query: + differences.append({ + 'panel': panel_name, + 'status': 'only_upstream', + 'upstream_query': up_query['expr'], + }) + elif sc_query and not up_query: + differences.append({ + 'panel': panel_name, + 'status': 'only_scroll', + 'scroll_query': sc_query['expr'], + }) + + return differences + +def analyze_variables_detailed(upstream_vars: List[Dict], scroll_vars: List[Dict]) -> Dict: + """Analyze variable differences in detail""" + upstream_by_name = {v['name']: v for v in upstream_vars} + scroll_by_name = {v['name']: v for v in scroll_vars} + + analysis = { + 'added_in_scroll': [], + 'removed_in_scroll': [], + 'modified': [], + } + + for name, var in scroll_by_name.items(): + if name not in upstream_by_name: + analysis['added_in_scroll'].append(var) + elif upstream_by_name[name] != var: + analysis['modified'].append({ + 'name': name, + 'upstream': upstream_by_name[name], + 'scroll': var, + }) + + for name, var in upstream_by_name.items(): + if name not in scroll_by_name: + analysis['removed_in_scroll'].append(var) + + return analysis + +def generate_update_plan(filename: str, upstream_path: Path, scroll_path: Path) -> str: + """Generate detailed update plan for a dashboard""" + upstream = load_json(upstream_path) + scroll = load_json(scroll_path) + + up_queries = extract_all_queries(upstream) + sc_queries = extract_all_queries(scroll) + + query_diffs = compare_queries_detailed(up_queries, sc_queries) + + up_vars = upstream.get('templating', {}).get('list', []) + sc_vars = scroll.get('templating', {}).get('list', []) + + var_analysis = analyze_variables_detailed(up_vars, sc_vars) + + plan = [] + plan.append(f"## UPDATE PLAN: {filename}") + plan.append("=" * 80) + plan.append("") + + # Dashboard metadata + plan.append(f"**Dashboard**: {upstream.get('title')}") + plan.append(f"**Upstream UID**: {upstream.get('uid')}") + plan.append(f"**Scroll UID**: {scroll.get('uid')}") + plan.append("") + + # Variable analysis + plan.append("### VARIABLES") + plan.append("") + + if var_analysis['added_in_scroll']: + plan.append("**Kubernetes Variables Added in Scroll** (MUST PRESERVE):") + for var in var_analysis['added_in_scroll']: + plan.append(f" - `{var['name']}` (type: {var.get('type')})") + if var.get('query'): + plan.append(f" Query: `{var['query']}`") + plan.append("") + + if var_analysis['removed_in_scroll']: + plan.append("**Variables Removed in Scroll** (SHOULD ADD BACK FROM UPSTREAM):") + for var in var_analysis['removed_in_scroll']: + plan.append(f" - `{var['name']}` (type: {var.get('type')})") + if var.get('query'): + plan.append(f" Query: `{var['query']}`") + plan.append("") + + # Query analysis + plan.append("### QUERY ANALYSIS") + plan.append("") + + k8s_customizations = [d for d in query_diffs if d['status'] == 'modified' and d['added_labels']] + upstream_only_panels = [d for d in query_diffs if d['status'] == 'only_upstream'] + scroll_only_panels = [d for d in query_diffs if d['status'] == 'only_scroll'] + + if k8s_customizations: + plan.append(f"**Kubernetes Customizations** ({len(k8s_customizations)} panels):") + plan.append("") + for diff in k8s_customizations[:5]: # Show first 5 examples + plan.append(f"**Panel: {diff['panel']}**") + if diff['added_labels']: + plan.append(f" Added label filters: {diff['added_labels']}") + if diff['added_variables']: + plan.append(f" Added variables: {diff['added_variables']}") + plan.append("") + if len(k8s_customizations) > 5: + plan.append(f" ... and {len(k8s_customizations) - 5} more panels with customizations") + plan.append("") + + if upstream_only_panels: + plan.append(f"**Panels Only in Upstream** ({len(upstream_only_panels)} panels) - NEW FEATURES:") + for panel in upstream_only_panels: + plan.append(f" - {panel['panel']}") + plan.append("") + + if scroll_only_panels: + plan.append(f"**Panels Only in Scroll** ({len(scroll_only_panels)} panels) - CUSTOM ADDITIONS:") + for panel in scroll_only_panels: + plan.append(f" - {panel['panel']}") + plan.append("") + + # Update strategy + plan.append("### UPDATE STRATEGY") + plan.append("") + plan.append("**Step 1: Variable Reconciliation**") + + if var_analysis['added_in_scroll']: + plan.append(" - Keep Kubernetes variables from scroll version:") + for var in var_analysis['added_in_scroll']: + plan.append(f" * `{var['name']}`") + + if var_analysis['removed_in_scroll']: + plan.append(" - Add back missing upstream variables:") + for var in var_analysis['removed_in_scroll']: + plan.append(f" * `{var['name']}`") + plan.append("") + + plan.append("**Step 2: Panel Structure**") + plan.append(" - Use upstream panel structure as base (including new panels)") + if upstream_only_panels: + plan.append(f" - Add {len(upstream_only_panels)} new panels from upstream") + if scroll_only_panels: + plan.append(f" - Evaluate {len(scroll_only_panels)} scroll-only panels for retention") + plan.append("") + + plan.append("**Step 3: Query Migration**") + plan.append(" - For each query, apply Kubernetes label filters:") + + # Detect common K8s pattern + k8s_labels_to_add = set() + for diff in k8s_customizations: + for label, _ in diff['added_labels']: + k8s_labels_to_add.add(label) + + if k8s_labels_to_add: + plan.append(f" * Add label filters: {', '.join(sorted(k8s_labels_to_add))}") + + # Detect variable pattern changes + var_replacements = set() + for diff in k8s_customizations: + if diff['added_variables']: + for v in diff['added_variables']: + var_replacements.add(v) + + if var_replacements: + plan.append(f" * Add variable references: {', '.join(sorted([f'${v}' for v in var_replacements]))}") + plan.append("") + + plan.append("**Step 4: Testing Requirements**") + plan.append(" - Verify all queries work in Kubernetes environment") + plan.append(" - Check variable dropdowns populate correctly") + plan.append(" - Validate metric label selectors match Helm deployment") + plan.append("") + + # Specific example + if k8s_customizations: + plan.append("### EXAMPLE TRANSFORMATION") + plan.append("") + example = k8s_customizations[0] + plan.append(f"**Panel: {example['panel']}**") + plan.append("") + plan.append("**Upstream Query:**") + plan.append("```promql") + plan.append(example['upstream_query'][:500]) + plan.append("```") + plan.append("") + plan.append("**Scroll Query (with K8s filters):**") + plan.append("```promql") + plan.append(example['scroll_query'][:500]) + plan.append("```") + plan.append("") + plan.append("**Changes Applied:**") + if example['added_labels']: + plan.append(f" - Added label filters: {example['added_labels']}") + if example['added_variables']: + plan.append(f" - Added variables: {example['added_variables']}") + plan.append("") + + plan.append("=" * 80) + plan.append("") + + return "\n".join(plan) + +def main(): + upstream_dir = Path('etc/grafana/dashboards') + scroll_dir = Path('etc/grafana/scroll') + + # Find all dashboards + upstream_files = {f.name: f for f in upstream_dir.glob('*.json')} + scroll_files = {f.name: f for f in scroll_dir.glob('*.json')} + + common_files = set(upstream_files.keys()) & set(scroll_files.keys()) + + print("=" * 80) + print("DETAILED DASHBOARD ANALYSIS AND UPDATE PLANS") + print("=" * 80) + print() + + # Case A: Detailed analysis + for filename in sorted(common_files): + plan = generate_update_plan(filename, upstream_files[filename], scroll_files[filename]) + print(plan) + + # Case B: metrics-exporter.json + print("\n" + "=" * 80) + print("CASE B: UPSTREAM-ONLY DASHBOARDS") + print("=" * 80) + print() + + only_upstream = set(upstream_files.keys()) - set(scroll_files.keys()) + for filename in sorted(only_upstream): + upstream = load_json(upstream_files[filename]) + print(f"## {filename}") + print() + print(f"**Title**: {upstream.get('title')}") + print(f"**UID**: {upstream.get('uid')}") + print(f"**Panels**: {len(upstream.get('panels', []))}") + print(f"**Variables**: {len(upstream.get('templating', {}).get('list', []))}") + print() + print("**Recommendation**:") + + # Analyze if it's relevant + queries = extract_all_queries(upstream) + metrics = set() + for q in queries: + patterns = analyze_query_patterns(q['expr']) + metrics.update(patterns['metrics']) + + print(f" - Metrics used: {', '.join(sorted(metrics)[:5])}") + print(f" - Total unique metrics: {len(metrics)}") + print() + + if 'ethereum' in upstream.get('title', '').lower(): + print(" **Action**: Consider porting to scroll/ with K8s adaptations") + print(" **Reason**: Ethereum-specific metrics likely relevant for Scroll") + else: + print(" **Action**: Evaluate relevance for Scroll deployment") + print() + print(" **If porting:**") + print(" 1. Copy from upstream as base") + print(" 2. Add K8s variables (env, pod, service)") + print(" 3. Add label filters to all queries: pod=~\"$pod\", etc.") + print(" 4. Update datasource references if needed") + print() + + # Case C: scroll-only + print("\n" + "=" * 80) + print("CASE C: SCROLL-ONLY DASHBOARDS") + print("=" * 80) + print() + + only_scroll = set(scroll_files.keys()) - set(upstream_files.keys()) + for filename in sorted(only_scroll): + scroll = load_json(scroll_files[filename]) + print(f"## {filename}") + print() + print(f"**Title**: {scroll.get('title')}") + print(f"**UID**: {scroll.get('uid')}") + print(f"**Panels**: {len(scroll.get('panels', []))}") + print(f"**Variables**: {len(scroll.get('templating', {}).get('list', []))}") + print() + + queries = extract_all_queries(scroll) + metrics = set() + for q in queries: + patterns = analyze_query_patterns(q['expr']) + metrics.update(patterns['metrics']) + + print(f" - Metrics used: {', '.join(sorted(metrics))}") + print() + print("**Recommendation**:") + print(" - **Action**: Keep as scroll-specific custom dashboard") + print(" - **Reason**: Custom performance monitoring for Scroll deployment") + print(" - **Maintenance**: Update as needed for scroll-specific requirements") + print() + +if __name__ == '__main__': + main()
diff --git reth/claude/tools/sync_dashboard.py scroll-reth/claude/tools/sync_dashboard.py new file mode 100644 index 0000000000000000000000000000000000000000..89a4ac51612eebfdcbd3e67f4937471e94cce9d3 --- /dev/null +++ scroll-reth/claude/tools/sync_dashboard.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +""" +Grafana Dashboard K8s Transformation Script +Syncs upstream dashboard structure with Scroll's Kubernetes customizations +""" + +import json +import re +import sys +from pathlib import Path +from typing import Dict, Any, List +from copy import deepcopy + +def add_k8s_variables(dashboard: Dict, preserve_uid: str = None) -> Dict: + """Add standard K8s variables to dashboard templating (env and service only)""" + k8s_vars = [ + { + "current": { + "text": "mainnet", + "value": "mainnet" + }, + "definition": "label_values(env)", + "name": "env", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(env)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(sepolia|mainnet)-eks.*", + "type": "query" + }, + { + "current": { + "text": "l1reth-el-0", + "value": "l1reth-el-0" + }, + "definition": "label_values(reth_info{namespace=\"$env\"},service)", + "name": "service", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(reth_info{namespace=\"$env\"},service)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(l[1|2]reth.*)", + "type": "query" + } + ] + + if 'templating' not in dashboard: + dashboard['templating'] = {'list': []} + + # Preserve dashboard-specific variables (like interval for reth-state-growth) + existing_vars = dashboard.get('templating', {}).get('list', []) + preserved_vars = [v for v in existing_vars if v.get('name') in ['interval']] + + # Replace with K8s variables + preserved dashboard-specific variables + dashboard['templating']['list'] = k8s_vars + preserved_vars + + # Preserve scroll UID if provided + if preserve_uid: + dashboard['uid'] = preserve_uid + + return dashboard + +def transform_query(query: str) -> str: + """ + Transform PromQL query to use K8s labels (service and namespace, no pod) + Uses exact match (=) not regex (=~) for precise service filtering + Includes namespace filter to prevent cross-environment aggregation + This enables data continuity when pods are replaced + """ + if not query or not isinstance(query, str): + return query + + # Pattern 1: $instance_label="$instance" or $instance_label=~"$instance" + query = re.sub( + r'\$instance_label\s*=~?\s*["\']?\$instance["\']?', + 'service="$service", namespace="$env"', + query + ) + + # Pattern 2: instance="$instance" or instance=~"$instance" (direct usage) + query = re.sub( + r'instance\s*=~?\s*["\']?\$instance["\']?', + 'service="$service", namespace="$env"', + query + ) + + # Pattern 3: {$instance_label="$instance"} at start of label set + query = re.sub( + r'\{\s*\$instance_label\s*=~?\s*["\']?\$instance["\']?\s*,', + '{service="$service", namespace="$env",', + query + ) + + # Pattern 4: {instance="$instance"} at start of label set + query = re.sub( + r'\{\s*instance\s*=~?\s*["\']?\$instance["\']?\s*,', + '{service="$service", namespace="$env",', + query + ) + + # Pattern 5: , $instance_label="$instance"} at end of label set + query = re.sub( + r',\s*\$instance_label\s*=~?\s*["\']?\$instance["\']?\s*\}', + ', service="$service", namespace="$env"}', + query + ) + + # Pattern 6: , instance="$instance"} at end of label set + query = re.sub( + r',\s*instance\s*=~?\s*["\']?\$instance["\']?\s*\}', + ', service="$service", namespace="$env"}', + query + ) + + # Pattern 7: {$instance_label="$instance"} as only label + query = re.sub( + r'\{\s*\$instance_label\s*=~?\s*["\']?\$instance["\']?\s*\}', + '{service="$service", namespace="$env"}', + query + ) + + # Pattern 8: {instance="$instance"} as only label + query = re.sub( + r'\{\s*instance\s*=~?\s*["\']?\$instance["\']?\s*\}', + '{service="$service", namespace="$env"}', + query + ) + + return query + +def transform_target(target: Dict) -> Dict: + """Transform a single query target""" + if 'expr' in target and target['expr']: + target['expr'] = transform_query(target['expr']) + return target + +def set_hardcoded_datasource(obj: Any) -> Any: + """Replace all datasource references with hardcoded UID""" + if isinstance(obj, dict): + # If this is a datasource object, replace with hardcoded UID + if 'datasource' in obj: + obj['datasource'] = { + "type": "prometheus", + "uid": "o59qe-zVz" + } + # Recursively process all dict values + for key, value in obj.items(): + obj[key] = set_hardcoded_datasource(value) + elif isinstance(obj, list): + # Recursively process all list items + return [set_hardcoded_datasource(item) for item in obj] + + return obj + +def transform_panel(panel: Dict) -> Dict: + """Transform all queries in a panel recursively""" + # Transform targets in this panel + if 'targets' in panel: + panel['targets'] = [transform_target(t) for t in panel['targets']] + + # Set hardcoded datasource for panel and all nested objects + panel = set_hardcoded_datasource(panel) + + # Recursively handle nested panels (rows with collapsed panels) + if 'panels' in panel: + panel['panels'] = [transform_panel(p) for p in panel['panels']] + + return panel + +def sync_dashboard(upstream_path: str, scroll_uid: str = None, output_path: str = None) -> Dict: + """ + Main sync function: takes upstream dashboard and applies K8s transformations + + Args: + upstream_path: Path to upstream dashboard JSON + scroll_uid: UID to preserve from scroll version (optional) + output_path: Where to save the result (optional, defaults to print) + + Returns: + Transformed dashboard dict + """ + # Load upstream dashboard + with open(upstream_path, 'r') as f: + dashboard = json.load(f) + + print(f"Processing: {dashboard.get('title', 'Unknown')}") + print(f" Upstream panels: {len(dashboard.get('panels', []))}") + + # Add K8s variables + dashboard = add_k8s_variables(dashboard, preserve_uid=scroll_uid) + + # Transform all panels + panel_count = 0 + target_count = 0 + + transformed_panels = [] + for panel in dashboard.get('panels', []): + panel = transform_panel(panel) + transformed_panels.append(panel) + panel_count += 1 + if 'targets' in panel: + target_count += len(panel['targets']) + if 'panels' in panel: # Row with nested panels + for subpanel in panel['panels']: + panel_count += 1 + if 'targets' in subpanel: + target_count += len(subpanel['targets']) + + dashboard['panels'] = transformed_panels + + print(f" Transformed panels: {panel_count}") + print(f" Transformed queries: {target_count}") + print(f" Variables: {len(dashboard['templating']['list'])}") + + # Save if output path provided + if output_path: + with open(output_path, 'w') as f: + json.dump(dashboard, f, indent=2) + print(f" ✓ Saved to: {output_path}") + + return dashboard + +def get_scroll_uid(scroll_path: str) -> str: + """Extract UID from existing scroll dashboard""" + try: + with open(scroll_path, 'r') as f: + data = json.load(f) + return data.get('uid') + except: + return None + +def main(): + """Process all dashboards""" + upstream_dir = Path('etc/grafana/dashboards') + scroll_dir = Path('etc/grafana/scroll') + + # Dashboards to sync + dashboards = [ + 'overview.json', + 'reth-discovery.json', + 'reth-mempool.json', + 'reth-state-growth.json', + ] + + print("=" * 80) + print("GRAFANA DASHBOARD SYNCHRONIZATION") + print("=" * 80) + print() + + for filename in dashboards: + upstream_path = upstream_dir / filename + scroll_path = scroll_dir / filename + output_path = scroll_dir / filename + + # Get scroll UID to preserve + scroll_uid = get_scroll_uid(scroll_path) if scroll_path.exists() else None + + print(f"\n{'=' * 80}") + print(f"Dashboard: {filename}") + if scroll_uid: + print(f" Preserving UID: {scroll_uid}") + print(f"{'=' * 80}") + + # Sync and save + sync_dashboard(str(upstream_path), scroll_uid, str(output_path)) + + print("\n" + "=" * 80) + print("SYNCHRONIZATION COMPLETE") + print("=" * 80) + print("\nNext steps:") + print(" 1. Review the updated dashboards") + print(" 2. Validate JSON syntax") + print(" 3. Test in Grafana") + print(" 4. Commit changes") + +if __name__ == '__main__': + main()
diff --git reth/crates/chain-state/src/in_memory.rs scroll-reth/crates/chain-state/src/in_memory.rs index cd194db81e3d6976efb5f9961f9a6d84eb10d4a7..31ed8bda5cd961b790f456791a5a6aea63d8ae51 100644 --- reth/crates/chain-state/src/in_memory.rs +++ scroll-reth/crates/chain-state/src/in_memory.rs @@ -570,7 +570,7 @@ pub struct BlockState<N: NodePrimitives = EthPrimitives> { /// The executed block that determines the state after this block has been executed. block: ExecutedBlockWithTrieUpdates<N>, /// The block's parent block if it exists. - parent: Option<Arc<BlockState<N>>>, + parent: Option<Arc<Self>>, }   impl<N: NodePrimitives> BlockState<N> { @@ -1380,8 +1380,7 @@ #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); - let chain: Vec<_> = state.canonical_chain().collect(); - assert!(chain.is_empty()); + assert!(state.canonical_chain().next().is_none()); }   #[test]
diff --git reth/crates/chainspec/src/api.rs scroll-reth/crates/chainspec/src/api.rs index 80327d38b6d713663053465c7e6ddf687f6e7160..70fe3a23daaa493a229d7332a4a04c58a2a91d3e 100644 --- reth/crates/chainspec/src/api.rs +++ scroll-reth/crates/chainspec/src/api.rs @@ -1,7 +1,6 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::Header; use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; @@ -75,8 +74,8 @@ )) } }   -impl EthChainSpec for ChainSpec { - type Header = Header; +impl<H: BlockHeader> EthChainSpec for ChainSpec<H> { + type Header = H;   fn chain(&self) -> Chain { self.chain @@ -136,3 +135,13 @@ fn final_paris_total_difficulty(&self) -> Option<U256> { self.paris_block_and_final_difficulty.map(|(_, final_difficulty)| final_difficulty) } } + +/// Trait representing the current capacities of the fork. +pub trait EthereumCapabilities: EthereumHardforks { + /// Returns true if the withdrawals are active. + fn withdrawals_active(&self, ts: u64) -> bool { + self.is_shanghai_active_at_timestamp(ts) + } +} + +impl EthereumCapabilities for ChainSpec {}
diff --git reth/crates/chainspec/src/lib.rs scroll-reth/crates/chainspec/src/lib.rs index 96db768a1c2e2ebad8a28074e4c4db8b257f880d..45fd0dcba1ec6aeefab78cf2fae78da0bdd5370b 100644 --- reth/crates/chainspec/src/lib.rs +++ scroll-reth/crates/chainspec/src/lib.rs @@ -25,7 +25,7 @@ pub use alloy_chains::{Chain, ChainKind, NamedChain}; /// Re-export for convenience pub use reth_ethereum_forks::*;   -pub use api::EthChainSpec; +pub use api::{EthChainSpec, EthereumCapabilities}; pub use info::ChainInfo; #[cfg(any(test, feature = "test-utils"))] pub use spec::test_fork_ids;
diff --git reth/crates/chainspec/src/spec.rs scroll-reth/crates/chainspec/src/spec.rs index 089b6c1c6c93558582f4da5a68720f6ea6070b58..c788b3bded89f1fbfc9805180a5f9c0aec0ac618 100644 --- reth/crates/chainspec/src/spec.rs +++ scroll-reth/crates/chainspec/src/spec.rs @@ -3,7 +3,7 @@ use alloy_evm::eth::spec::EthExecutorSpec;   use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - holesky, hoodi, mainnet, sepolia, EthChainSpec, + holesky, hoodi, sepolia, EthChainSpec, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; @@ -28,8 +28,8 @@ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, }; use reth_network_peers::{ - holesky_nodes, hoodi_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, - NodeRecord, + holesky_nodes, hoodi_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, scroll_nodes, + scroll_sepolia_nodes, sepolia_nodes, NodeRecord, }; use reth_primitives_traits::{sync::LazyLock, BlockHeader, SealedHeader};   @@ -108,10 +108,7 @@ // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT, - blob_params: BlobScheduleBlobParams::default().with_scheduled([ - (mainnet::MAINNET_BPO1_TIMESTAMP, BlobParams::bpo1()), - (mainnet::MAINNET_BPO2_TIMESTAMP, BlobParams::bpo2()), - ]), + blob_params: BlobScheduleBlobParams::default(), }; spec.genesis.config.dao_fork_support = true; spec.into() @@ -266,7 +263,7 @@ /// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism. #[derive(Clone, Debug, PartialEq, Eq, From)] pub struct ForkBaseFeeParams(Vec<(Box<dyn Hardfork>, BaseFeeParams)>);   -impl core::ops::Deref for ChainSpec { +impl<H: BlockHeader> core::ops::Deref for ChainSpec<H> { type Target = ChainHardforks;   fn deref(&self) -> &Self::Target { @@ -616,9 +613,11 @@ C::Holesky => Some(holesky_nodes()), C::Hoodi => Some(hoodi_nodes()), // opstack uses the same bootnodes for all chains: <https://github.com/paradigmxyz/reth/issues/14603> C::Base | C::Optimism | C::Unichain | C::World => Some(op_nodes()), + C::Scroll => Some(scroll_nodes()), C::OptimismSepolia | C::BaseSepolia | C::UnichainSepolia | C::WorldSepolia => { Some(op_testnet_nodes()) } + C::ScrollSepolia => Some(scroll_sepolia_nodes()),   // fallback for optimism chains chain if chain.is_optimism() && chain.is_testnet() => Some(op_testnet_nodes()), @@ -1033,7 +1032,7 @@ } } }   -impl EthExecutorSpec for ChainSpec { +impl<H: BlockHeader> EthExecutorSpec for ChainSpec<H> { fn deposit_contract_address(&self) -> Option<Address> { self.deposit_contract.map(|deposit_contract| deposit_contract.address) } @@ -1129,10 +1128,7 @@ - Paris @58750000000000000000000 (network is known to be merged) Post-merge hard forks (timestamp based): - Shanghai @1681338455 - Cancun @1710338135 -- Prague @1746612311 -- Osaka @1764798551 -- Bpo1 @1765978199 -- Bpo2 @1767747671" +- Prague @1746612311" ); }   @@ -1376,10 +1372,7 @@ ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 }, ), ( EthereumHardfork::Prague, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -1523,22 +1516,12 @@ ), // First Prague block ( Head { number: 20000002, timestamp: 1746612311, ..Default::default() }, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), - // Osaka block + // Future Prague block ( - Head { - number: 20000002, - timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, - ..Default::default() - }, - ForkId { - hash: ForkHash(hex!("0x5167e2a6")), - next: mainnet::MAINNET_BPO1_TIMESTAMP, - }, + Head { number: 20000002, timestamp: 2000000000, ..Default::default() }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -1847,22 +1830,11 @@ ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 }, ), // First Prague block ( Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, - ), - // Osaka block + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + ), // Future Prague block ( - Head { - number: 20000004, - timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, - ..Default::default() - }, - ForkId { - hash: ForkHash(hex!("0x5167e2a6")), - next: mainnet::MAINNET_BPO1_TIMESTAMP, - }, + Head { number: 20000004, timestamp: 2000000000, ..Default::default() }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -2519,8 +2491,10 @@ }   #[test] fn latest_eth_mainnet_fork_id() { - // BPO2 - assert_eq!(ForkId { hash: ForkHash(hex!("0xfd414558")), next: 0 }, MAINNET.latest_fork_id()) + assert_eq!( + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + MAINNET.latest_fork_id() + ) }   #[test]
diff --git reth/crates/cli/commands/src/common.rs scroll-reth/crates/cli/commands/src/common.rs index 1ceba8f57dab462309fe531ec318ab4720b68fcc..25f32f63a2bc5a57d26b5f910975cffb3bd4b49c 100644 --- reth/crates/cli/commands/src/common.rs +++ scroll-reth/crates/cli/commands/src/common.rs @@ -24,7 +24,7 @@ use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, ProviderFactory, StaticFileProviderFactory, }; -use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; +use reth_stages::{sets::DefaultStages, Pipeline}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; @@ -126,7 +126,6 @@ ) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> where C: ChainSpecParser<ChainSpec = N::ChainSpec>, { - let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>::new( @@ -137,9 +136,8 @@ ) .with_prune_modes(prune_modes.clone());   // Check for consistency between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? + if let Some(unwind_target) = + factory.static_file_provider().check_consistency(&factory.provider()?)? { if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); @@ -150,7 +148,7 @@ // Highly unlikely to happen, and given its destructive nature, it's better to panic // instead. assert_ne!( unwind_target, - PipelineTarget::Unwind(0), + 0, "A static file <> database inconsistency was found that would trigger an unwind to block 0" );   @@ -175,7 +173,7 @@ .build(factory.clone(), StaticFileProducer::new(factory.clone(), prune_modes));   // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind(unwind_target.unwind_target().expect("should exist"), None)?; + pipeline.unwind(unwind_target, None)?; }   Ok(factory)
diff --git reth/crates/cli/commands/src/db/get.rs scroll-reth/crates/cli/commands/src/db/get.rs index 6214df0ec98e564618b7aa7f6edc83056747289f..9d06a35dcaa3483b5a0b1c5a922bd2cba10535a7 100644 --- reth/crates/cli/commands/src/db/get.rs +++ scroll-reth/crates/cli/commands/src/db/get.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ @@ -66,9 +65,10 @@ table.view(&GetValueViewer { tool, key, subkey, raw })? } Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { - StaticFileSegment::Headers => { - (table_key::<tables::Headers>(&key)?, <HeaderWithHashMask<Header>>::MASK) - } + StaticFileSegment::Headers => ( + table_key::<tables::Headers>(&key)?, + <HeaderWithHashMask<HeaderTy<N>>>::MASK, + ), StaticFileSegment::Transactions => { (table_key::<tables::Transactions>(&key)?, <TransactionMask<TxTy<N>>>::MASK) }
diff --git reth/crates/cli/commands/src/db/list.rs scroll-reth/crates/cli/commands/src/db/list.rs index 9288a56a86c9e5161b59f811a3acbd88fc9c7f74..2540e77c111301436e9e0f067eb1ae33dc0dc207 100644 --- reth/crates/cli/commands/src/db/list.rs +++ scroll-reth/crates/cli/commands/src/db/list.rs @@ -97,7 +97,7 @@ fn view<T: Table>(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; - let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; + let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); let final_entry_idx = total_entries.saturating_sub(1); if self.args.skip > final_entry_idx {
diff --git reth/crates/cli/commands/src/db/repair_trie.rs scroll-reth/crates/cli/commands/src/db/repair_trie.rs index e7ee8d7977cbd85f87f80493533a56398bf00c94..e5b7db0e2f0dbe26aab07c4d3777bdfb6ea25e07 100644 --- reth/crates/cli/commands/src/db/repair_trie.rs +++ scroll-reth/crates/cli/commands/src/db/repair_trie.rs @@ -179,8 +179,17 @@ } Output::StorageWrong { account, path, expected: node, .. } | Output::StorageMissing(account, path, node) => { // Wrong/missing storage node value, upsert it + // (We can't just use `upsert` method with a dup cursor, it's not properly + // supported) let nibbles = StoredNibblesSubKey(path); - let entry = StorageTrieEntry { nibbles, node }; + let entry = StorageTrieEntry { nibbles: nibbles.clone(), node }; + if storage_trie_cursor + .seek_by_key_subkey(account, nibbles.clone())? + .filter(|v| v.nibbles == nibbles) + .is_some() + { + storage_trie_cursor.delete_current()?; + } storage_trie_cursor.upsert(account, &entry)?; } Output::Progress(path) => {
diff --git reth/crates/cli/commands/src/node.rs scroll-reth/crates/cli/commands/src/node.rs index 7e1ba97fb91a546f4a1ed25d5c569b5054cb45f5..c66ac1443a2b53aec52f78d129f2b640a99a6d79 100644 --- reth/crates/cli/commands/src/node.rs +++ scroll-reth/crates/cli/commands/src/node.rs @@ -5,18 +5,17 @@ use clap::{value_parser, Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_cli_util::parse_socket_address; use reth_db::init_db; use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, NetworkArgs, - PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs, + NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, }; -use std::{ffi::OsString, fmt, net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ffi::OsString, fmt, path::PathBuf, sync::Arc};   /// Start the node #[derive(Debug, Parser)] @@ -39,11 +38,9 @@ required = false, )] pub chain: Arc<C::ChainSpec>,   - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] - pub metrics: Option<SocketAddr>, + /// Prometheus metrics configuration. + #[command(flatten)] + pub metrics: MetricArgs,   /// Add a new instance of a node. /// @@ -225,7 +222,7 @@ use super::*; use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; use std::{ - net::{IpAddr, Ipv4Addr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::Path, };   @@ -237,18 +234,29 @@ assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp); }   #[test] - fn parse_common_node_command_chain_args() { + fn parse_rommon_node_command_chain_args() { for chain in SUPPORTED_CHAINS { - let args: NodeCommand<EthereumChainSpecParser> = - NodeCommand::parse_from(["reth", "--chain", chain]); + let args: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from([ + "reth", + "--chain", + chain, + "--builder.gaslimit", + "10000000", + ]); assert_eq!(args.chain.chain, chain.parse::<reth_chainspec::Chain>().unwrap()); } }   #[test] fn parse_discovery_addr() { - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--discovery.addr", "127.0.0.1"]).unwrap(); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--discovery.addr", + "127.0.0.1", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); assert_eq!(cmd.network.discovery.addr, IpAddr::V4(Ipv4Addr::LOCALHOST)); }   @@ -260,6 +268,8 @@ "--discovery.addr", "127.0.0.1", "--addr", "127.0.0.1", + "--builder.gaslimit", + "10000000", ]) .unwrap(); assert_eq!(cmd.network.discovery.addr, IpAddr::V4(Ipv4Addr::LOCALHOST)); @@ -268,46 +278,92 @@ }   #[test] fn parse_discovery_port() { - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300"]).unwrap(); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--discovery.port", + "300", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); assert_eq!(cmd.network.discovery.port, 300); }   #[test] fn parse_port() { - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300", "--port", "99"]) - .unwrap(); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--discovery.port", + "300", + "--port", + "99", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); assert_eq!(cmd.network.discovery.port, 300); assert_eq!(cmd.network.port, 99); }   #[test] fn parse_metrics_port() { - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--metrics", "9001"]).unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--metrics", + "9001", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + );   - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--metrics", ":9001"]).unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--metrics", + ":9001", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + );   - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--metrics", "localhost:9001"]).unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--metrics", + "localhost:9001", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); }   #[test] fn parse_config_path() { - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap(); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--config", + "my/path/to/reth.toml", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain); let config_path = cmd.config.unwrap_or_else(|| data_dir.config()); assert_eq!(config_path, Path::new("my/path/to/reth.toml"));   let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth"]).unwrap(); + NodeCommand::try_parse_args_from(["reth", "--builder.gaslimit", "10000000"]).unwrap();   // always store reth.toml in the data dir, not the chain specific data dir let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain); @@ -319,15 +375,21 @@ #[test] fn parse_db_path() { let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth"]).unwrap(); + NodeCommand::try_parse_args_from(["reth", "--builder.gaslimit", "10000000"]).unwrap(); let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain);   let db_path = data_dir.db(); let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]); assert!(db_path.ends_with(end), "{:?}", cmd.config);   - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap(); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([ + "reth", + "--datadir", + "my/custom/path", + "--builder.gaslimit", + "10000000", + ]) + .unwrap(); let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain);   let db_path = data_dir.db(); @@ -336,7 +398,8 @@ }   #[test] fn parse_instance() { - let mut cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from(["reth"]); + let mut cmd: NodeCommand<EthereumChainSpecParser> = + NodeCommand::parse_from(["reth", "--builder.gaslimit", "10000000"]); cmd.rpc.adjust_instance_ports(cmd.instance); cmd.network.port = DEFAULT_DISCOVERY_PORT; // check rpc port numbers @@ -347,7 +410,7 @@ // check network listening port number assert_eq!(cmd.network.port, 30303);   let mut cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::parse_from(["reth", "--instance", "2"]); + NodeCommand::parse_from(["reth", "--instance", "2", "--builder.gaslimit", "10000000"]); cmd.rpc.adjust_instance_ports(cmd.instance); cmd.network.port = DEFAULT_DISCOVERY_PORT + 2 - 1; // check rpc port numbers @@ -358,7 +421,7 @@ // check network listening port number assert_eq!(cmd.network.port, 30304);   let mut cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::parse_from(["reth", "--instance", "3"]); + NodeCommand::parse_from(["reth", "--instance", "3", "--builder.gaslimit", "10000000"]); cmd.rpc.adjust_instance_ports(cmd.instance); cmd.network.port = DEFAULT_DISCOVERY_PORT + 3 - 1; // check rpc port numbers @@ -371,8 +434,12 @@ }   #[test] fn parse_with_unused_ports() { - let cmd: NodeCommand<EthereumChainSpecParser> = - NodeCommand::parse_from(["reth", "--with-unused-ports"]); + let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from([ + "reth", + "--with-unused-ports", + "--builder.gaslimit", + "10000000", + ]); assert!(cmd.with_unused_ports); }   @@ -383,6 +450,8 @@ "reth", "--with-unused-ports", "--instance", "2", + "--builder.gaslimit", + "10000000", ]) .unwrap_err(); assert_eq!(err.kind(), clap::error::ErrorKind::ArgumentConflict); @@ -390,7 +459,8 @@ }   #[test] fn with_unused_ports_check_zero() { - let mut cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from(["reth"]); + let mut cmd: NodeCommand<EthereumChainSpecParser> = + NodeCommand::parse_from(["reth", "--builder.gaslimit", "10000000"]); cmd.rpc = cmd.rpc.with_unused_ports(); cmd.network = cmd.network.with_unused_ports();
diff --git reth/crates/cli/commands/src/p2p/mod.rs scroll-reth/crates/cli/commands/src/p2p/mod.rs index 861fd836e7637200310228b55dcb724b3d1651e7..792d4533856813b1751a2beecec2b7406f55cb90 100644 --- reth/crates/cli/commands/src/p2p/mod.rs +++ scroll-reth/crates/cli/commands/src/p2p/mod.rs @@ -38,9 +38,9 @@ let backoff = args.backoff();   let header = (move || get_single_header(fetch_client.clone(), id)) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; - println!("Successfully downloaded header: {header:?}"); + tracing::info!(target: "reth::cli", ?header, "Successfully downloaded header"); }   Subcommands::Body { args, id } => { @@ -51,13 +51,13 @@ let hash = match id { BlockHashOrNumber::Hash(hash) => hash, BlockHashOrNumber::Number(number) => { - println!("Block number provided. Downloading header first..."); + tracing::info!(target: "reth::cli", "Block number provided. Downloading header first..."); let client = fetch_client.clone(); let header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(number)) }) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; header.hash() } @@ -67,7 +67,7 @@ let client = fetch_client.clone(); client.get_block_bodies(vec![hash]) }) .retry(backoff) - .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting block. Retrying...")) .await? .split(); if result.len() != 1 { @@ -77,7 +77,7 @@ result.len() ) } let body = result.into_iter().next().unwrap(); - println!("Successfully downloaded body: {body:?}") + tracing::info!(target: "reth::cli", ?body, "Successfully downloaded body") } Subcommands::Rlpx(command) => { command.execute().await?;
diff --git reth/crates/cli/commands/src/stage/dump/merkle.rs scroll-reth/crates/cli/commands/src/stage/dump/merkle.rs index ee7564f7cb23ecbb512fed2a0ebeef24e937d34e..f7c85c89e24d608f26a67aeb9340a7a28357e20d 100644 --- reth/crates/cli/commands/src/stage/dump/merkle.rs +++ scroll-reth/crates/cli/commands/src/stage/dump/merkle.rs @@ -4,7 +4,7 @@ use super::setup; use alloy_primitives::BlockNumber; use eyre::Result; use reth_config::config::EtlConfig; -use reth_consensus::{ConsensusError, FullConsensus}; +use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus}; use reth_db::DatabaseEnv; use reth_db_api::{database::Database, table::TableImporter, tables}; use reth_db_common::DbTool; @@ -92,9 +92,10 @@ let execute_input = reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) };   // Unwind hashes all the way to FROM + StorageHashingStage::default().unwind(&provider, unwind)?; AccountHashingStage::default().unwind(&provider, unwind)?; - MerkleStage::default_unwind().unwind(&provider, unwind)?; + MerkleStage::<N::Primitives>::new_unwind(NoopConsensus::arc()).unwind(&provider, unwind)?;   // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( @@ -155,10 +156,11 @@ { info!(target: "reth::cli", "Executing stage."); let provider = output_provider_factory.database_provider_rw()?;   - let mut stage = MerkleStage::Execution { + let mut stage = MerkleStage::<N::Primitives>::Execution { // Forces updating the root instead of calculating from scratch rebuild_threshold: u64::MAX, incremental_threshold: u64::MAX, + consensus: NoopConsensus::arc(), };   loop {
diff --git reth/crates/cli/commands/src/stage/run.rs scroll-reth/crates/cli/commands/src/stage/run.rs index 4e577af06be02372874092e42a991ce53cce654e..83363184a827e8a11baec80e4e8ac9e2aecfbe46 100644 --- reth/crates/cli/commands/src/stage/run.rs +++ scroll-reth/crates/cli/commands/src/stage/run.rs @@ -293,13 +293,17 @@ etl_config, )), None, ), - StageEnum::Merkle => ( - Box::new(MerkleStage::new_execution( - config.stages.merkle.rebuild_threshold, - config.stages.merkle.incremental_threshold, - )), - Some(Box::new(MerkleStage::default_unwind())), - ), + StageEnum::Merkle => { + let consensus = Arc::new(components.consensus().clone()); + ( + Box::new(MerkleStage::<N::Primitives>::new_execution( + config.stages.merkle.rebuild_threshold, + config.stages.merkle.incremental_threshold, + consensus.clone(), + )), + Some(Box::new(MerkleStage::<N::Primitives>::new_unwind(consensus))), + ) + } StageEnum::AccountHistory => ( Box::new(IndexAccountHistoryStage::new( config.stages.index_account_history,
diff --git reth/crates/cli/commands/src/test_vectors/compact.rs scroll-reth/crates/cli/commands/src/test_vectors/compact.rs index ca88c131ff6bac4bf5d01143b7071e7d69e78328..f4636f5f83b5e814452b8f3b285526105fbac45a 100644 --- reth/crates/cli/commands/src/test_vectors/compact.rs +++ scroll-reth/crates/cli/commands/src/test_vectors/compact.rs @@ -283,7 +283,7 @@ pub fn type_name<T>() -> String { // With alloy type transition <https://github.com/paradigmxyz/reth/pull/15768> the types are renamed, we map them here to the original name so that test vector files remain consistent let name = std::any::type_name::<T>(); match name { - "alloy_consensus::transaction::typed::EthereumTypedTransaction<alloy_consensus::transaction::eip4844::TxEip4844>" => "Transaction".to_string(), + "alloy_consensus::transaction::envelope::EthereumTypedTransaction<alloy_consensus::transaction::eip4844::TxEip4844>" => "Transaction".to_string(), "alloy_consensus::transaction::envelope::EthereumTxEnvelope<alloy_consensus::transaction::eip4844::TxEip4844>" => "TransactionSigned".to_string(), name => { name.split("::").last().unwrap_or(std::any::type_name::<T>()).to_string()
diff --git reth/crates/cli/commands/src/test_vectors/tables.rs scroll-reth/crates/cli/commands/src/test_vectors/tables.rs index 1bbd2604f975ed9364a15fb0a8a6fb94da046052..ef34e5b5e84463775e634b6b7a0fa9f29592ed6d 100644 --- reth/crates/cli/commands/src/test_vectors/tables.rs +++ scroll-reth/crates/cli/commands/src/test_vectors/tables.rs @@ -54,7 +54,7 @@ for table in tables { match table.as_str() { $( stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); + tracing::info!(target: "reth::cli", "Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME);   generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); },
diff --git reth/crates/cli/util/src/sigsegv_handler.rs scroll-reth/crates/cli/util/src/sigsegv_handler.rs index dabbf866cee168f25ff84bd06ee63d2901fba097..eeca446b72a7c9b81b199910d270e801cbe3a7a6 100644 --- reth/crates/cli/util/src/sigsegv_handler.rs +++ scroll-reth/crates/cli/util/src/sigsegv_handler.rs @@ -126,7 +126,7 @@ alt_stack.ss_size = alt_stack_size; libc::sigaltstack(&raw const alt_stack, ptr::null_mut());   let mut sa: libc::sigaction = mem::zeroed(); - sa.sa_sigaction = print_stack_trace as libc::sighandler_t; + sa.sa_sigaction = print_stack_trace as *const () as libc::sighandler_t; sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK; libc::sigemptyset(&raw mut sa.sa_mask); libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut());
diff --git reth/crates/consensus/consensus/src/lib.rs scroll-reth/crates/consensus/consensus/src/lib.rs index a267dfe902f200e153c0f208f86593470ccf9f3b..669eb19d1a40604d7242a316655936ee2a3fff53 100644 --- reth/crates/consensus/consensus/src/lib.rs +++ scroll-reth/crates/consensus/consensus/src/lib.rs @@ -18,7 +18,7 @@ use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{ constants::{MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, transaction::error::InvalidTransactionError, - Block, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock, + Block, BlockHeader, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, };   @@ -62,8 +62,9 @@ /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". + /// See the Yellow Paper sections 4.4.2 "Holistic Validity", 4.4.4 "Block Header Validity". + /// Note: Ommer Validation (previously section 11.1) has been deprecated since the Paris hard + /// fork transition to proof of stake. /// /// **This should not be called for the genesis block**. /// @@ -73,7 +74,7 @@ }   /// `HeaderValidator` is a protocol that validates headers and their relationships. #[auto_impl::auto_impl(&, Arc)] -pub trait HeaderValidator<H = Header>: Debug + Send + Sync { +pub trait HeaderValidator<H: BlockHeader = Header>: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. @@ -121,6 +122,22 @@ } } Ok(()) } + + /// Validate the block header against a provided expected state root. + fn validate_state_root(&self, header: &H, root: B256) -> Result<(), ConsensusError> { + validate_state_root(header, root) + } +} + +/// Validate the provided state root against the block's state root. +pub fn validate_state_root<H: BlockHeader>(header: &H, root: B256) -> Result<(), ConsensusError> { + if header.state_root() != root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: root, expected: header.state_root() }.into(), + )) + } + + Ok(()) }   /// Consensus Errors
diff --git reth/crates/consensus/consensus/src/noop.rs scroll-reth/crates/consensus/consensus/src/noop.rs index 259fae27d677eb5d57a1da8030a88b5b0206ff9e..dd0f9ae1e30eccfb1287bd1e5883a91bd9a6c6e0 100644 --- reth/crates/consensus/consensus/src/noop.rs +++ scroll-reth/crates/consensus/consensus/src/noop.rs @@ -1,7 +1,10 @@ use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use alloc::sync::Arc; +use alloy_primitives::B256; use reth_execution_types::BlockExecutionResult; -use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{ + Block, BlockHeader, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, +};   /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] @@ -15,7 +18,7 @@ Arc::new(Self::default()) } }   -impl<H> HeaderValidator<H> for NoopConsensus { +impl<H: BlockHeader> HeaderValidator<H> for NoopConsensus { fn validate_header(&self, _header: &SealedHeader<H>) -> Result<(), ConsensusError> { Ok(()) } @@ -25,6 +28,10 @@ &self, _header: &SealedHeader<H>, _parent: &SealedHeader<H>, ) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_state_root(&self, _header: &H, _root: B256) -> Result<(), ConsensusError> { Ok(()) } }
diff --git reth/crates/consensus/consensus/src/test_utils.rs scroll-reth/crates/consensus/consensus/src/test_utils.rs index ad881cc9a7fb0c6b32c9543ec2e72f068b9b034a..e36ab0141761a4ef5e6a335c2710830580bea5eb 100644 --- reth/crates/consensus/consensus/src/test_utils.rs +++ scroll-reth/crates/consensus/consensus/src/test_utils.rs @@ -1,7 +1,10 @@ use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; +use alloy_primitives::B256; use core::sync::atomic::{AtomicBool, Ordering}; use reth_execution_types::BlockExecutionResult; -use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{ + Block, BlockHeader, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, +};   /// Consensus engine implementation for testing #[derive(Debug)] @@ -84,7 +87,7 @@ } } }   -impl<H> HeaderValidator<H> for TestConsensus { +impl<H: BlockHeader> HeaderValidator<H> for TestConsensus { fn validate_header(&self, _header: &SealedHeader<H>) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -103,5 +106,9 @@ Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } + } + + fn validate_state_root(&self, _header: &H, _root: B256) -> Result<(), ConsensusError> { + Ok(()) } }
diff --git reth/crates/e2e-test-utils/src/lib.rs scroll-reth/crates/e2e-test-utils/src/lib.rs index a51b78ae65441a425d5dccca01671894a8bc1edf..89d07b190238c4e0b5cda2e501c011d1d2a405bd 100644 --- reth/crates/e2e-test-utils/src/lib.rs +++ scroll-reth/crates/e2e-test-utils/src/lib.rs @@ -5,9 +5,10 @@ use reth_chainspec::{ChainSpec, EthChainSpec}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::NodeAddOns; use reth_node_builder::{ components::NodeComponentsBuilder, - rpc::{EngineValidatorAddOn, RethRpcAddOns}, + rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandleProvider}, EngineNodeLauncher, FullNodeTypesAdapter, Node, NodeAdapter, NodeBuilder, NodeComponents, NodeConfig, NodeHandle, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, @@ -61,6 +62,7 @@ >, N::AddOns: RethRpcAddOns<Adapter<N>> + EngineValidatorAddOn<Adapter<N>>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder<<<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes>, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -125,6 +127,7 @@ where N: NodeBuilderHelper, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder<<N::Payload as PayloadTypes>::PayloadAttributes>, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { setup_engine_with_connection::<N>( num_nodes, @@ -154,6 +157,7 @@ where N: NodeBuilderHelper, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder<<N::Payload as PayloadTypes>::PayloadAttributes>, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -237,6 +241,14 @@ TmpNodeAdapter<N, Provider>, >>::Components, >;   +/// Type alias for a `NodeHandle` for a `TmpNodeAdapter`. +pub type TmpNodeAddOnsHandle<N> = + <<N as Node<TmpNodeAdapter<N>>>::AddOns as NodeAddOns<Adapter<N>>>::Handle; + +/// Type alias for the `EthApi` for a `TmpNodeAdapter`. +pub type TmpNodeEthApi<N> = + <<N as Node<TmpNodeAdapter<N>>>::AddOns as RethRpcAddOns<Adapter<N>>>::EthApi; + /// Type alias for a type of `NodeHelper` pub type NodeHelperType<N, Provider = BlockchainProvider<NodeTypesWithDBAdapter<N, TmpDB>>> = NodeTestContext<Adapter<N, Provider>, <N as Node<TmpNodeAdapter<N, Provider>>>::AddOns>; @@ -273,6 +285,7 @@ ChainSpec: From<ChainSpec> + Clone, >, LocalPayloadAttributesBuilder<Self::ChainSpec>: PayloadAttributesBuilder<<Self::Payload as PayloadTypes>::PayloadAttributes>, + TmpNodeAddOnsHandle<Self>: RpcHandleProvider<Adapter<Self>, TmpNodeEthApi<Self>>, { }   @@ -307,5 +320,6 @@ ChainSpec: From<ChainSpec> + Clone, >, LocalPayloadAttributesBuilder<Self::ChainSpec>: PayloadAttributesBuilder<<Self::Payload as PayloadTypes>::PayloadAttributes>, + TmpNodeAddOnsHandle<Self>: RpcHandleProvider<Adapter<Self>, TmpNodeEthApi<Self>>, { }
diff --git reth/crates/e2e-test-utils/src/node.rs scroll-reth/crates/e2e-test-utils/src/node.rs index 4dd1ae63e1aa469194d6df20b6e670f2c46e4614..e000b870c6bed587923e875a7d5d1ed78e6b1dbc 100644 --- reth/crates/e2e-test-utils/src/node.rs +++ scroll-reth/crates/e2e-test-utils/src/node.rs @@ -13,8 +13,10 @@ use reth_node_api::{ Block, BlockBody, BlockTy, EngineApiMessageVersion, FullNodeComponents, PayloadTypes, PrimitivesTy, }; -use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeTypes}; - +use reth_node_builder::{ + rpc::{RethRpcAddOns, RpcHandleProvider}, + FullNode, NodeTypes, +}; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_provider::{ BlockReader, BlockReaderIdExt, CanonStateNotificationStream, CanonStateSubscriptions, @@ -33,6 +35,7 @@ pub struct NodeTestContext<Node, AddOns> where Node: FullNodeComponents, AddOns: RethRpcAddOns<Node>, + AddOns::Handle: RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, { /// The core structure representing the full node. pub inner: FullNode<Node, AddOns>, @@ -53,6 +56,7 @@ Node: FullNodeComponents, Node::Types: NodeTypes<ChainSpec: EthereumHardforks, Payload = Payload>, Node::Network: PeersHandleProvider, AddOns: RethRpcAddOns<Node>, + AddOns::Handle: RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, { /// Creates a new test node pub async fn new( @@ -67,7 +71,7 @@ attributes_generator, ) .await?, network: NetworkTestContext::new(node.network.clone()), - rpc: RpcTestContext { inner: node.add_ons_handle.rpc_registry }, + rpc: RpcTestContext { inner: node.add_ons_handle.rpc_handle().rpc_registry.clone() }, canonical_stream: node.provider.canonical_state_stream(), }) } @@ -256,6 +260,7 @@ /// Sends a forkchoice update message to the engine. pub async fn update_forkchoice(&self, current_head: B256, new_head: B256) -> eyre::Result<()> { self.inner .add_ons_handle + .rpc_handle() .beacon_engine_handle .fork_choice_updated( ForkchoiceState { @@ -281,6 +286,7 @@ pub async fn submit_payload(&self, payload: Payload::BuiltPayload) -> eyre::Result<B256> { let block_hash = payload.block().hash(); self.inner .add_ons_handle + .rpc_handle() .beacon_engine_handle .new_payload(Payload::block_to_payload(payload.block().clone())) .await?; @@ -315,7 +321,7 @@ .rpc_client() .ok_or_else(|| eyre::eyre!("Failed to create HTTP RPC client for node"))?; let auth = self.auth_server_handle(); let url = self.rpc_url(); - let beacon_handle = self.inner.add_ons_handle.beacon_engine_handle.clone(); + let beacon_handle = self.inner.add_ons_handle.rpc_handle().beacon_engine_handle.clone();   Ok(crate::testsuite::NodeClient::new_with_beacon_engine(rpc, auth, url, beacon_handle)) }
diff --git reth/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs scroll-reth/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 9d2088c11a40a1cf5cc2e4145d945515f3955fc8..74a5e2ba1d516dcfd1b94fac762f2fed629490c9 100644 --- reth/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ scroll-reth/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -510,7 +510,7 @@ fn execute<'a>(&'a mut self, env: &'a mut Environment<Engine>) -> BoxFuture<'a, Result<()>> { Box::pin(async move { let mut accepted_check: bool = false;   - let mut latest_block = env + let latest_block = env .current_block_info() .ok_or_else(|| eyre::eyre!("No latest block information available"))?;   @@ -603,10 +603,6 @@ env.active_node_state_mut()?.latest_header_time = rpc_latest_header.inner.timestamp; env.active_node_state_mut()?.latest_fork_choice_state.head_block_hash = rpc_latest_header.hash; - - // update local copy for any further usage in this scope - latest_block.hash = rpc_latest_header.hash; - latest_block.number = rpc_latest_header.inner.number; } }
diff --git reth/crates/e2e-test-utils/src/testsuite/mod.rs scroll-reth/crates/e2e-test-utils/src/testsuite/mod.rs index 79e906ef5926f567a3fb3fd7afcabed7d43a0ef0..a22e0d6fae336e4b2666e09bfed150fb628a4c04 100644 --- reth/crates/e2e-test-utils/src/testsuite/mod.rs +++ scroll-reth/crates/e2e-test-utils/src/testsuite/mod.rs @@ -2,7 +2,8 @@ //! Utilities for running e2e tests against a node or a network of nodes.   use crate::{ testsuite::actions::{Action, ActionBox}, - NodeBuilderHelper, PayloadAttributesBuilder, + Adapter, NodeBuilderHelper, PayloadAttributesBuilder, RpcHandleProvider, TmpNodeAddOnsHandle, + TmpNodeEthApi, }; use alloy_primitives::B256; use eyre::Result; @@ -352,6 +353,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { let mut setup = self.setup.take();
diff --git reth/crates/e2e-test-utils/src/testsuite/setup.rs scroll-reth/crates/e2e-test-utils/src/testsuite/setup.rs index 94f661753b5ac3355b38863ccda75f5912155677..bccda8bb267572e0e5b88efeecd1fea4964959de 100644 --- reth/crates/e2e-test-utils/src/testsuite/setup.rs +++ scroll-reth/crates/e2e-test-utils/src/testsuite/setup.rs @@ -1,8 +1,8 @@ //! Test setup utilities for configuring the initial state.   use crate::{ - setup_engine_with_connection, testsuite::Environment, NodeBuilderHelper, - PayloadAttributesBuilder, + setup_engine_with_connection, testsuite::Environment, Adapter, NodeBuilderHelper, + PayloadAttributesBuilder, RpcHandleProvider, TmpNodeAddOnsHandle, TmpNodeEthApi, }; use alloy_eips::BlockNumberOrTag; use alloy_primitives::B256; @@ -141,6 +141,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { // Note: this future is quite large so we box it Box::pin(self.apply_with_import_::<N>(env, rlp_path)).await @@ -157,6 +158,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { // Create nodes with imported chain data let import_result = self.create_nodes_with_import::<N>(rlp_path).await?; @@ -189,6 +191,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { // Note: this future is quite large so we box it Box::pin(self.apply_::<N>(env)).await @@ -201,6 +204,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { // If import_rlp_path is set, use apply_with_import instead if let Some(rlp_path) = self.import_rlp_path.take() { @@ -268,6 +272,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { let chain_spec = self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; @@ -304,6 +309,7 @@ N: NodeBuilderHelper<Payload = I>, LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle<N>: RpcHandleProvider<Adapter<N>, TmpNodeEthApi<N>>, { move |timestamp| { let attributes = PayloadAttributes {
diff --git reth/crates/e2e-test-utils/src/transaction.rs scroll-reth/crates/e2e-test-utils/src/transaction.rs index 54f984692423783cec0903ec2c26af5918e3da9b..3ee437ce376e58befef9bf7150ca9f14252fd62d 100644 --- reth/crates/e2e-test-utils/src/transaction.rs +++ scroll-reth/crates/e2e-test-utils/src/transaction.rs @@ -36,6 +36,17 @@ let signed = Self::transfer_tx(chain_id, wallet).await; signed.encoded_2718().into() }   + /// Creates a transfer with a nonce and signs it, returning bytes. + pub async fn transfer_tx_nonce_bytes( + chain_id: u64, + wallet: PrivateKeySigner, + nonce: u64, + ) -> Bytes { + let tx = tx(chain_id, 21000, None, None, nonce, Some(20e9 as u128)); + let signed = Self::sign_tx(wallet, tx).await; + signed.encoded_2718().into() + } + /// Creates a deployment transaction and signs it, returning an envelope. pub async fn deploy_tx( chain_id: u64,
diff --git reth/crates/engine/invalid-block-hooks/Cargo.toml scroll-reth/crates/engine/invalid-block-hooks/Cargo.toml index 8d4a469ee16d223b040bcff1bdc1e55afc068ca1..5b3563c7ac396dcf7effcf61bfb259d6e539ec80 100644 --- reth/crates/engine/invalid-block-hooks/Cargo.toml +++ scroll-reth/crates/engine/invalid-block-hooks/Cargo.toml @@ -12,6 +12,7 @@ workspace = true   [dependencies] # reth +revm.workspace = true revm-bytecode.workspace = true revm-database.workspace = true reth-engine-primitives.workspace = true @@ -38,3 +39,13 @@ jsonrpsee.workspace = true pretty_assertions.workspace = true serde.workspace = true serde_json.workspace = true + +[dev-dependencies] +alloy-eips.workspace = true +reth-chainspec.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true +tempfile.workspace = true
diff --git reth/crates/engine/invalid-block-hooks/src/witness.rs scroll-reth/crates/engine/invalid-block-hooks/src/witness.rs index f979958a1986e20477f6df60fa80529ebfcc73d3..d00f3b8287b0cc09e1e90005ba2aac10679c7c0e 100644 --- reth/crates/engine/invalid-block-hooks/src/witness.rs +++ scroll-reth/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,31 +1,50 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use pretty_assertions::Comparison; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_provider::{BlockExecutionOutput, StateProviderFactory}; -use reth_revm::{database::StateProviderDatabase, db::BundleState, state::AccountInfo}; +use reth_provider::{BlockExecutionOutput, StateProvider, StateProviderFactory}; +use reth_revm::{ + database::StateProviderDatabase, + db::{BundleState, State}, +}; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedStorage}; +use revm::state::AccountInfo; use revm_bytecode::Bytecode; -use revm_database::states::{ - reverts::{AccountInfoRevert, RevertToSlot}, - AccountStatus, StorageSlot, +use revm_database::{ + states::{reverts::AccountInfoRevert, StorageSlot}, + AccountStatus, RevertToSlot, }; use serde::Serialize; use std::{collections::BTreeMap, fmt::Debug, fs::File, io::Write, path::PathBuf};   +type CollectionResult = + (BTreeMap<B256, Bytes>, BTreeMap<B256, Bytes>, reth_trie::HashedPostState, BundleState); + +/// Serializable version of `BundleState` for deterministic comparison #[derive(Debug, PartialEq, Eq)] -struct AccountRevertSorted { - pub account: AccountInfoRevert, - pub storage: BTreeMap<U256, RevertToSlot>, - pub previous_status: AccountStatus, - pub wipe_storage: bool, +struct BundleStateSorted { + /// Account state + pub state: BTreeMap<Address, BundleAccountSorted>, + /// All created contracts in this block. + pub contracts: BTreeMap<B256, Bytecode>, + /// Changes to revert + /// + /// **Note**: Inside vector is *not* sorted by address. + /// + /// But it is unique by address. + pub reverts: Vec<Vec<(Address, AccountRevertSorted)>>, + /// The size of the plain state in the bundle state + pub state_size: usize, + /// The size of reverts in the bundle state + pub reverts_size: usize, }   +/// Serializable version of `BundleAccount` #[derive(Debug, PartialEq, Eq)] struct BundleAccountSorted { pub info: Option<AccountInfo>, @@ -40,74 +59,120 @@ /// Account status. pub status: AccountStatus, }   +/// Serializable version of `AccountRevert` #[derive(Debug, PartialEq, Eq)] -struct BundleStateSorted { - /// Account state - pub state: BTreeMap<Address, BundleAccountSorted>, - /// All created contracts in this block. - pub contracts: BTreeMap<B256, Bytecode>, - /// Changes to revert - /// - /// **Note**: Inside vector is *not* sorted by address. - /// - /// But it is unique by address. - pub reverts: Vec<Vec<(Address, AccountRevertSorted)>>, - /// The size of the plain state in the bundle state - pub state_size: usize, - /// The size of reverts in the bundle state - pub reverts_size: usize, +struct AccountRevertSorted { + pub account: AccountInfoRevert, + pub storage: BTreeMap<U256, RevertToSlot>, + pub previous_status: AccountStatus, + pub wipe_storage: bool, }   -impl BundleStateSorted { - fn from_bundle_state(bundle_state: &BundleState) -> Self { - let state = bundle_state +/// Converts bundle state to sorted format for deterministic comparison +fn sort_bundle_state_for_comparison(bundle_state: &BundleState) -> BundleStateSorted { + BundleStateSorted { + state: bundle_state .state - .clone() - .into_iter() - .map(|(address, account)| { + .iter() + .map(|(addr, acc)| { ( - address, + *addr, BundleAccountSorted { - info: account.info, - original_info: account.original_info, - status: account.status, - storage: BTreeMap::from_iter(account.storage), + info: acc.info.clone(), + original_info: acc.original_info.clone(), + storage: BTreeMap::from_iter(acc.storage.clone()), + status: acc.status, }, ) }) - .collect(); - - let contracts = BTreeMap::from_iter(bundle_state.contracts.clone()); - - let reverts = bundle_state + .collect(), + contracts: BTreeMap::from_iter(bundle_state.contracts.clone()), + reverts: bundle_state .reverts .iter() .map(|block| { block .iter() - .map(|(address, account_revert)| { + .map(|(addr, rev)| { ( - *address, + *addr, AccountRevertSorted { - account: account_revert.account.clone(), - previous_status: account_revert.previous_status, - wipe_storage: account_revert.wipe_storage, - storage: BTreeMap::from_iter(account_revert.storage.clone()), + account: rev.account.clone(), + storage: BTreeMap::from_iter(rev.storage.clone()), + previous_status: rev.previous_status, + wipe_storage: rev.wipe_storage, }, ) }) .collect() }) - .collect(); + .collect(), + state_size: bundle_state.state_size, + reverts_size: bundle_state.reverts_size, + } +}   - let state_size = bundle_state.state_size; - let reverts_size = bundle_state.reverts_size; +/// Extracts execution data including codes, preimages, and hashed state from database +fn collect_execution_data( + mut db: State<StateProviderDatabase<Box<dyn StateProvider>>>, +) -> eyre::Result<CollectionResult> { + let bundle_state = db.take_bundle(); + let mut codes = BTreeMap::new(); + let mut preimages = BTreeMap::new(); + let mut hashed_state = db.database.hashed_post_state(&bundle_state);   - Self { state, contracts, reverts, state_size, reverts_size } + // Collect codes + db.cache.contracts.values().chain(bundle_state.contracts.values()).for_each(|code| { + let code_bytes = code.original_bytes(); + codes.insert(keccak256(&code_bytes), code_bytes); + }); + + // Collect preimages + for (address, account) in db.cache.accounts { + let hashed_address = keccak256(address); + hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + + if let Some(account_data) = account.account { + preimages.insert(hashed_address, alloy_rlp::encode(address).into()); + let storage = hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + for (slot, value) in account_data.storage { + let slot_bytes = B256::from(slot); + let hashed_slot = keccak256(slot_bytes); + storage.storage.insert(hashed_slot, value); + preimages.insert(hashed_slot, alloy_rlp::encode(slot_bytes).into()); + } + } } + + Ok((codes, preimages, hashed_state, bundle_state)) }   -/// Generates a witness for the given block and saves it to a file. +/// Generates execution witness from collected codes, preimages, and hashed state +fn generate( + codes: BTreeMap<B256, Bytes>, + preimages: BTreeMap<B256, Bytes>, + hashed_state: reth_trie::HashedPostState, + state_provider: Box<dyn StateProvider>, +) -> eyre::Result<ExecutionWitness> { + let state = state_provider.witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { + state, + codes: codes.into_values().collect(), + keys: preimages.into_values().collect(), + ..Default::default() + }) +} + +/// Hook for generating execution witnesses when invalid blocks are detected. +/// +/// This hook captures the execution state and generates witness data that can be used +/// for debugging and analysis of invalid block execution. #[derive(Debug)] pub struct InvalidBlockWitnessHook<P, E> { /// The provider to read the historical state and do the EVM execution. @@ -139,103 +204,51 @@ P: StateProviderFactory + Send + Sync + 'static, E: ConfigureEvm<Primitives = N> + 'static, N: NodePrimitives, { - fn on_invalid_block( + /// Re-executes the block and collects execution data + fn re_execute_block( &self, parent_header: &SealedHeader<N::BlockHeader>, block: &RecoveredBlock<N::Block>, - output: &BlockExecutionOutput<N::Receipt>, - trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { - // TODO(alexey): unify with `DebugApi::debug_execution_witness` - + ) -> eyre::Result<(ExecutionWitness, BundleState)> { let mut executor = self.evm_config.batch_executor(StateProviderDatabase::new( self.provider.state_by_block_hash(parent_header.hash())?, ));   executor.execute_one(block)?; - - // Take the bundle state - let mut db = executor.into_state(); - let bundle_state = db.take_bundle(); - - // Initialize a map of preimages. - let mut state_preimages = Vec::default(); - - // Get codes - let codes = db - .cache - .contracts - .values() - .map(|code| code.original_bytes()) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - bundle_state.contracts.values().map(|code| code.original_bytes()), - ) - .collect(); + let db = executor.into_state(); + let (codes, preimages, hashed_state, bundle_state) = collect_execution_data(db)?;   - // Grab all account proofs for the data accessed during block execution. - // - // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes - // referenced accounts + storage slots. - let mut hashed_state = db.database.hashed_post_state(&bundle_state); - for (address, account) in db.cache.accounts { - let hashed_address = keccak256(address); - hashed_state - .accounts - .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let witness = generate(codes, preimages, hashed_state, state_provider)?;   - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + Ok((witness, bundle_state)) + }   - if let Some(account) = account.account { - state_preimages.push(alloy_rlp::encode(address).into()); - - for (slot, value) in account.storage { - let slot = B256::from(slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, value); - - state_preimages.push(alloy_rlp::encode(slot).into()); - } - } - } - - // Generate an execution witness for the aggregated state of accessed accounts. - // Destruct the cache database to retrieve the state provider. - let state_provider = db.database.into_inner(); - let state = state_provider.witness(Default::default(), hashed_state.clone())?; + /// Handles witness generation, saving, and comparison with healthy node + fn handle_witness_operations( + &self, + witness: &ExecutionWitness, + block_prefix: &str, + block_number: u64, + ) -> eyre::Result<()> { + let filename = format!("{}.witness.re_executed.json", block_prefix); + let re_executed_witness_path = self.save_file(filename, witness)?;   - // Write the witness to the output directory. - let response = - ExecutionWitness { state, codes, keys: state_preimages, ..Default::default() }; - let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), - &response, - )?; if let Some(healthy_node_client) = &self.healthy_node_client { - // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { DebugApiClient::<()>::debug_execution_witness( healthy_node_client, - block.number().into(), + block_number.into(), ) .await })?;   - let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number(), block.hash()), - &healthy_node_witness, - )?; + let filename = format!("{}.witness.healthy.json", block_prefix); + let healthy_path = self.save_file(filename, &healthy_node_witness)?;   - // If the witnesses are different, write the diff to the output directory. - if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); - let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; + if witness != &healthy_node_witness { + let filename = format!("{}.witness.diff", block_prefix); + let diff_path = self.save_diff(filename, witness, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", diff_path = %diff_path.display(), @@ -245,29 +258,26 @@ "Witness mismatch against healthy node" ); } } + Ok(()) + }   - // The bundle state after re-execution should match the original one. - // - // Reverts now supports order-independent equality, so we can compare directly without - // sorting the reverts vectors. - // - // See: https://github.com/bluealloy/revm/pull/1827 - if bundle_state != output.state { - let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), - &output.state, - )?; - let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), - &bundle_state, - )?; + /// Validates that the bundle state after re-execution matches the original + fn validate_bundle_state( + &self, + re_executed_state: &BundleState, + original_state: &BundleState, + block_prefix: &str, + ) -> eyre::Result<()> { + if re_executed_state != original_state { + let original_filename = format!("{}.bundle_state.original.json", block_prefix); + let original_path = self.save_file(original_filename, original_state)?; + let re_executed_filename = format!("{}.bundle_state.re_executed.json", block_prefix); + let re_executed_path = self.save_file(re_executed_filename, re_executed_state)?;   - let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); - // Convert bundle state to sorted struct which has BTreeMap instead of HashMap to - // have deterministic ordering - let bundle_state_sorted = BundleStateSorted::from_bundle_state(&bundle_state); - let output_state_sorted = BundleStateSorted::from_bundle_state(&output.state); - + // Convert bundle state to sorted format for deterministic comparison + let bundle_state_sorted = sort_bundle_state_for_comparison(re_executed_state); + let output_state_sorted = sort_bundle_state_for_comparison(original_state); + let filename = format!("{}.bundle_state.diff", block_prefix); let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?;   warn!( @@ -278,37 +288,44 @@ re_executed_path = %re_executed_path.display(), "Bundle state mismatch after re-execution" ); } + Ok(()) + }   - // Calculate the state root and trie updates after re-execution. They should match - // the original ones. + /// Validates state root and trie updates after re-execution + fn validate_state_root_and_trie( + &self, + parent_header: &SealedHeader<N::BlockHeader>, + block: &RecoveredBlock<N::Block>, + bundle_state: &BundleState, + trie_updates: Option<(&TrieUpdates, B256)>, + block_prefix: &str, + ) -> eyre::Result<()> { + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let hashed_state = state_provider.hashed_post_state(bundle_state); let (re_executed_root, trie_output) = state_provider.state_root_with_updates(hashed_state)?; + if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); + let filename = format!("{}.state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); }   - // If the re-executed state root does not match the _header_ state root, also log that. if re_executed_root != block.state_root() { - let filename = - format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let filename = format!("{}.header_state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); }   if &trie_output != original_updates { - // Trie updates are too big to diff, so we just save the original and re-executed - let trie_output_sorted = &trie_output.into_sorted_ref(); - let original_updates_sorted = &original_updates.into_sorted_ref(); let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), - original_updates_sorted, + format!("{}.trie_updates.original.json", block_prefix), + &original_updates.into_sorted_ref(), )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), - trie_output_sorted, + format!("{}.trie_updates.re_executed.json", block_prefix), + &trie_output.into_sorted_ref(), )?; warn!( target: "engine::invalid_block_hooks::witness", @@ -318,11 +335,44 @@ "Trie updates mismatch after re-execution" ); } } + Ok(()) + } + + fn on_invalid_block( + &self, + parent_header: &SealedHeader<N::BlockHeader>, + block: &RecoveredBlock<N::Block>, + output: &BlockExecutionOutput<N::Receipt>, + trie_updates: Option<(&TrieUpdates, B256)>, + ) -> eyre::Result<()> { + // TODO(alexey): unify with `DebugApi::debug_execution_witness` + let (witness, bundle_state) = self.re_execute_block(parent_header, block)?; + + let block_prefix = format!("{}_{}", block.number(), block.hash()); + self.handle_witness_operations(&witness, &block_prefix, block.number())?; + + self.validate_bundle_state(&bundle_state, &output.state, &block_prefix)?; + + self.validate_state_root_and_trie( + parent_header, + block, + &bundle_state, + trie_updates, + &block_prefix, + )?;   Ok(()) }   - /// Saves the diff of two values into a file with the given name in the output directory. + /// Serializes and saves a value to a JSON file in the output directory + fn save_file<T: Serialize>(&self, filename: String, value: &T) -> eyre::Result<PathBuf> { + let path = self.output_directory.join(filename); + File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; + + Ok(path) + } + + /// Compares two values and saves their diff to a file in the output directory fn save_diff<T: PartialEq + Debug>( &self, filename: String, @@ -332,13 +382,6 @@ ) -> eyre::Result<PathBuf> { let path = self.output_directory.join(filename); let diff = Comparison::new(original, new); File::create(&path)?.write_all(diff.to_string().as_bytes())?; - - Ok(path) - } - - fn save_file<T: Serialize>(&self, filename: String, value: &T) -> eyre::Result<PathBuf> { - let path = self.output_directory.join(filename); - File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?;   Ok(path) } @@ -361,3 +404,655 @@ warn!(target: "engine::invalid_block_hooks::witness", %err, "Failed to invoke hook"); } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use alloy_primitives::{map::HashMap, Address, Bytes, B256, U256}; + use reth_chainspec::ChainSpec; + use reth_ethereum_primitives::EthPrimitives; + use reth_evm_ethereum::EthEvmConfig; + use reth_provider::test_utils::MockEthProvider; + use reth_revm::db::{BundleAccount, BundleState}; + use revm_database::states::reverts::AccountRevert; + use tempfile::TempDir; + + use reth_revm::test_utils::StateProviderTest; + use reth_testing_utils::generators::{self, random_block, random_eoa_accounts, BlockParams}; + use revm_bytecode::Bytecode; + + /// Creates a test `BundleState` with realistic accounts, contracts, and reverts + fn create_bundle_state() -> BundleState { + let mut rng = generators::rng(); + let mut bundle_state = BundleState::default(); + + // Generate realistic EOA accounts using generators + let accounts = random_eoa_accounts(&mut rng, 3); + + for (i, (addr, account)) in accounts.into_iter().enumerate() { + // Create storage entries for each account + let mut storage = HashMap::default(); + let storage_key = U256::from(i + 1); + storage.insert( + storage_key, + StorageSlot { + present_value: U256::from((i + 1) * 10), + previous_or_original_value: U256::from((i + 1) * 15), + }, + ); + + let bundle_account = BundleAccount { + info: Some(AccountInfo { + balance: account.balance, + nonce: account.nonce, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + original_info: (i == 0).then(|| AccountInfo { + balance: account.balance.checked_div(U256::from(2)).unwrap_or(U256::ZERO), + nonce: 0, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + storage, + status: AccountStatus::default(), + }; + + bundle_state.state.insert(addr, bundle_account); + } + + // Generate realistic contract bytecode using generators + let contract_hashes: Vec<B256> = (0..3).map(|_| B256::random()).collect(); + for (i, hash) in contract_hashes.iter().enumerate() { + let bytecode = match i { + 0 => Bytes::from(vec![0x60, 0x80, 0x60, 0x40, 0x52]), // Simple contract + 1 => Bytes::from(vec![0x61, 0x81, 0x60, 0x00, 0x39]), // Another contract + _ => Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd]), // REVERT contract + }; + bundle_state.contracts.insert(*hash, Bytecode::new_raw(bytecode)); + } + + // Add reverts for multiple blocks using different accounts + let addresses: Vec<Address> = bundle_state.state.keys().copied().collect(); + for (i, addr) in addresses.iter().take(2).enumerate() { + let revert = AccountRevert { + wipe_storage: i == 0, // First account has storage wiped + ..AccountRevert::default() + }; + bundle_state.reverts.push(vec![(*addr, revert)]); + } + + // Set realistic sizes + bundle_state.state_size = bundle_state.state.len(); + bundle_state.reverts_size = bundle_state.reverts.len(); + + bundle_state + } + #[test] + fn test_sort_bundle_state_for_comparison() { + // Use the fixture function to create test data + let bundle_state = create_bundle_state(); + + // Call the function under test + let sorted = sort_bundle_state_for_comparison(&bundle_state); + + // Verify state_size and reverts_size values match the fixture + assert_eq!(sorted.state_size, 3); + assert_eq!(sorted.reverts_size, 2); + + // Verify state contains our mock accounts + assert_eq!(sorted.state.len(), 3); // We added 3 accounts + + // Verify contracts contains our mock contracts + assert_eq!(sorted.contracts.len(), 3); // We added 3 contracts + + // Verify reverts is an array with multiple blocks of reverts + let reverts = &sorted.reverts; + assert_eq!(reverts.len(), 2); // Fixture has two blocks of reverts + + // Verify that the state accounts have the expected structure + for account_data in sorted.state.values() { + // BundleAccountSorted has info, original_info, storage, and status fields + // Just verify the structure exists by accessing the fields + let _info = &account_data.info; + let _original_info = &account_data.original_info; + let _storage = &account_data.storage; + let _status = &account_data.status; + } + } + + #[test] + fn test_data_collector_collect() { + // Create test data using the fixture function + let bundle_state = create_bundle_state(); + + // Create a State with StateProviderTest + let state_provider = StateProviderTest::default(); + let mut state = State::builder() + .with_database(StateProviderDatabase::new( + Box::new(state_provider) as Box<dyn StateProvider> + )) + .with_bundle_update() + .build(); + + // Insert contracts from the fixture into the state cache + for (code_hash, bytecode) in &bundle_state.contracts { + state.cache.contracts.insert(*code_hash, bytecode.clone()); + } + + // Manually set the bundle state in the state object + state.bundle_state = bundle_state; + + // Call the collect function + let result = collect_execution_data(state); + // Verify the function returns successfully + assert!(result.is_ok()); + + let (codes, _preimages, _hashed_state, returned_bundle_state) = result.unwrap(); + + // Verify that the returned data contains expected values + // Since we used the fixture data, we should have some codes and state + assert!(!codes.is_empty(), "Expected some bytecode entries"); + assert!(!returned_bundle_state.state.is_empty(), "Expected some state entries"); + + // Verify the bundle state structure matches our fixture + assert_eq!(returned_bundle_state.state.len(), 3, "Expected 3 accounts from fixture"); + assert_eq!(returned_bundle_state.contracts.len(), 3, "Expected 3 contracts from fixture"); + } + + #[test] + fn test_re_execute_block() { + // Create hook instance + let (hook, _output_directory, _temp_dir) = create_test_hook(); + + // Setup to call re_execute_block + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + + // Create a random block that inherits from the parent header + let recovered_block = random_block( + &mut rng, + 2, // block number + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let result = hook.re_execute_block(&parent_header, &recovered_block); + + // Verify the function behavior with mock data + assert!(result.is_ok(), "re_execute_block should return Ok"); + } + + /// Creates test `InvalidBlockWitnessHook` with temporary directory + fn create_test_hook() -> ( + InvalidBlockWitnessHook<MockEthProvider<EthPrimitives, ChainSpec>, EthEvmConfig>, + PathBuf, + TempDir, + ) { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let output_directory = temp_dir.path().to_path_buf(); + + let provider = MockEthProvider::<EthPrimitives, ChainSpec>::default(); + let evm_config = EthEvmConfig::mainnet(); + + let hook = + InvalidBlockWitnessHook::new(provider, evm_config, output_directory.clone(), None); + + (hook, output_directory, temp_dir) + } + + #[test] + fn test_handle_witness_operations_with_healthy_client_mock() { + // Create hook instance with mock healthy client + let (hook, output_directory, _temp_dir) = create_test_hook(); + + // Create sample ExecutionWitness with correct types + let witness = ExecutionWitness { + state: vec![Bytes::from("state_data")], + codes: vec![Bytes::from("code_data")], + keys: vec![Bytes::from("key_data")], + ..Default::default() + }; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, "test_block_healthy", 67890); + + // Should succeed + assert!(result.is_ok()); + + // Check that witness file was created + let witness_file = output_directory.join("test_block_healthy.witness.re_executed.json"); + assert!(witness_file.exists()); + } + + #[test] + fn test_handle_witness_operations_file_creation() { + // Test file creation and content validation + let (hook, output_directory, _temp_dir) = create_test_hook(); + + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + + let block_prefix = "file_test_block"; + let block_number = 11111; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + + // Verify file was created with correct name + let expected_file = + output_directory.join(format!("{}.witness.re_executed.json", block_prefix)); + assert!(expected_file.exists()); + + // Read and verify file content is valid JSON and contains witness structure + let file_content = std::fs::read_to_string(&expected_file).expect("Failed to read file"); + let parsed_witness: serde_json::Value = + serde_json::from_str(&file_content).expect("File should contain valid JSON"); + + // Verify the JSON structure contains expected fields + assert!(parsed_witness.get("state").is_some(), "JSON should contain 'state' field"); + assert!(parsed_witness.get("codes").is_some(), "JSON should contain 'codes' field"); + assert!(parsed_witness.get("keys").is_some(), "JSON should contain 'keys' field"); + } + + #[test] + fn test_proof_generator_generate() { + // Use existing MockEthProvider + let mock_provider = MockEthProvider::default(); + let state_provider: Box<dyn StateProvider> = Box::new(mock_provider); + + // Mock Data + let mut codes = BTreeMap::new(); + codes.insert(B256::from([1u8; 32]), Bytes::from("contract_code_1")); + codes.insert(B256::from([2u8; 32]), Bytes::from("contract_code_2")); + + let mut preimages = BTreeMap::new(); + preimages.insert(B256::from([3u8; 32]), Bytes::from("preimage_1")); + preimages.insert(B256::from([4u8; 32]), Bytes::from("preimage_2")); + + let hashed_state = reth_trie::HashedPostState::default(); + + // Call generate function + let result = generate(codes.clone(), preimages.clone(), hashed_state, state_provider); + + // Verify result + assert!(result.is_ok(), "generate function should succeed"); + let execution_witness = result.unwrap(); + + assert!(execution_witness.state.is_empty(), "State should be empty from MockEthProvider"); + + let expected_codes: Vec<Bytes> = codes.into_values().collect(); + assert_eq!( + execution_witness.codes.len(), + expected_codes.len(), + "Codes length should match" + ); + for code in &expected_codes { + assert!( + execution_witness.codes.contains(code), + "Codes should contain expected bytecode" + ); + } + + let expected_keys: Vec<Bytes> = preimages.into_values().collect(); + assert_eq!(execution_witness.keys.len(), expected_keys.len(), "Keys length should match"); + for key in &expected_keys { + assert!(execution_witness.keys.contains(key), "Keys should contain expected preimage"); + } + } + + #[test] + fn test_validate_bundle_state_matching() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + let block_prefix = "test_block_123"; + + // Test with identical states - should not produce any warnings or files + let result = hook.validate_bundle_state(&bundle_state, &bundle_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_mismatch() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let original_state = create_bundle_state(); + let mut modified_state = create_bundle_state(); + + // Modify the state to create a mismatch + let addr = Address::from([1u8; 20]); + if let Some(account) = modified_state.state.get_mut(&addr) && + let Some(ref mut info) = account.info + { + info.balance = U256::from(999); + } + + let block_prefix = "test_block_mismatch"; + + // Test with different states - should save files and log warning + let result = hook.validate_bundle_state(&modified_state, &original_state, block_prefix); + assert!(result.is_ok()); + + // Verify that files were created + let original_file = output_dir.join(format!("{}.bundle_state.original.json", block_prefix)); + let re_executed_file = + output_dir.join(format!("{}.bundle_state.re_executed.json", block_prefix)); + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + + assert!(original_file.exists(), "Original bundle state file should be created"); + assert!(re_executed_file.exists(), "Re-executed bundle state file should be created"); + assert!(diff_file.exists(), "Diff file should be created"); + } + + /// Creates test `TrieUpdates` with account nodes and removed nodes + fn create_test_trie_updates() -> TrieUpdates { + use alloy_primitives::map::HashMap; + use reth_trie::{updates::TrieUpdates, BranchNodeCompact, Nibbles}; + use std::collections::HashSet; + + let mut account_nodes = HashMap::default(); + let nibbles = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]); + let branch_node = BranchNodeCompact::new( + 0b1010, // state_mask + 0b1010, // tree_mask - must be subset of state_mask + 0b1000, // hash_mask + vec![B256::from([1u8; 32])], // hashes + None, // root_hash + ); + account_nodes.insert(nibbles, branch_node); + + let mut removed_nodes = HashSet::default(); + removed_nodes.insert(Nibbles::from_nibbles_unchecked([0x4, 0x5, 0x6])); + + TrieUpdates { account_nodes, removed_nodes, storage_tries: HashMap::default() } + } + + #[test] + fn test_validate_state_root_and_trie_with_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let trie_updates = create_test_trie_updates(); + let original_root = B256::from([2u8; 32]); // Different from what will be computed + let block_prefix = "test_state_root_with_trie"; + + // Test with trie updates - this will likely produce warnings due to mock data + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + Some((&trie_updates, original_root)), + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_on_invalid_block_calls_all_validation_methods() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + // Create mock BlockExecutionOutput + let output = BlockExecutionOutput { + state: bundle_state, + result: reth_provider::BlockExecutionResult { + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + blob_gas_used: 0, + }, + }; + + // Create test trie updates + let trie_updates = create_test_trie_updates(); + let state_root = B256::random(); + + // Test that on_invalid_block attempts to call all its internal methods + // by checking that it doesn't panic and tries to create files + let files_before = output_dir.read_dir().unwrap().count(); + + let _result = hook.on_invalid_block( + &parent_header, + &recovered_block, + &output, + Some((&trie_updates, state_root)), + ); + + // Verify that the function attempted to process the block: + // Either it succeeded, or it created some output files during processing + let files_after = output_dir.read_dir().unwrap().count(); + + // The function should attempt to execute its workflow + assert!( + files_after >= files_before, + "on_invalid_block should attempt to create output files during processing" + ); + } + + #[test] + fn test_handle_witness_operations_with_empty_witness() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness::default(); + let block_prefix = "empty_witness_test"; + let block_number = 12345; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_zero_block_number() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + let block_prefix = "zero_block_test"; + let block_number = 0; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_large_witness_data() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let large_data = vec![0u8; 10000]; // 10KB of data + let witness = ExecutionWitness { + state: vec![Bytes::from(large_data.clone())], + codes: vec![Bytes::from(large_data.clone())], + keys: vec![Bytes::from(large_data)], + ..Default::default() + }; + let block_prefix = "large_witness_test"; + let block_number = 999999; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_empty_states() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let empty_state = BundleState::default(); + let block_prefix = "empty_states_test"; + + let result = hook.validate_bundle_state(&empty_state, &empty_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_different_contract_counts() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let state1 = create_bundle_state(); + let mut state2 = create_bundle_state(); + + // Add extra contract to state2 + let extra_contract_hash = B256::random(); + state2.contracts.insert( + extra_contract_hash, + Bytecode::new_raw(Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd])), // REVERT opcode + ); + + let block_prefix = "different_contracts_test"; + let result = hook.validate_bundle_state(&state1, &state2, block_prefix); + assert!(result.is_ok()); + + // Verify diff files were created + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + assert!(diff_file.exists()); + } + + #[test] + fn test_save_diff_with_identical_values() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let value1 = "identical_value"; + let value2 = "identical_value"; + let filename = "identical_diff_test".to_string(); + + let result = hook.save_diff(filename.clone(), &value1, &value2); + assert!(result.is_ok()); + + let diff_file = output_dir.join(filename); + assert!(diff_file.exists()); + } + + #[test] + fn test_validate_state_root_and_trie_without_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let block_prefix = "no_trie_updates_test"; + + // Test without trie updates (None case) + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + None, + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_complete_invalid_block_workflow() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create a realistic block scenario + let parent_header = generators::random_header(&mut rng, 100, None); + let invalid_block = random_block( + &mut rng, + 101, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(3), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let trie_updates = create_test_trie_updates(); + + // Test validation methods + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_test"); + assert!(validation_result.is_ok(), "Bundle state validation should succeed"); + + let state_root_result = hook.validate_state_root_and_trie( + &parent_header, + &invalid_block, + &bundle_state, + Some((&trie_updates, B256::random())), + "integration_test", + ); + assert!(state_root_result.is_ok(), "State root validation should succeed"); + } + + #[test] + fn test_integration_workflow_components() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create test data + let parent_header = generators::random_header(&mut rng, 50, None); + let _invalid_block = random_block( + &mut rng, + 51, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(2), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let _trie_updates = create_test_trie_updates(); + + // Test individual components that would be part of the complete flow + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_component_test"); + assert!(validation_result.is_ok(), "Component validation should succeed"); + } +}
diff --git reth/crates/engine/local/Cargo.toml scroll-reth/crates/engine/local/Cargo.toml index 98793a24b21d53d32f3545147b881ad5a9f4e21d..2de5ec3c8826b0c41324a0f2ae497ab05fd76cd4 100644 --- reth/crates/engine/local/Cargo.toml +++ scroll-reth/crates/engine/local/Cargo.toml @@ -23,6 +23,9 @@ alloy-consensus.workspace = true alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-rpc-types-engine.workspace = true   +# scroll +scroll-alloy-rpc-types-engine = { workspace = true, optional = true } + # async tokio.workspace = true tokio-stream.workspace = true @@ -44,3 +47,7 @@ "dep:op-alloy-rpc-types-engine", "dep:reth-optimism-chainspec", "reth-payload-primitives/op", ] +scroll-alloy-traits = [ + "dep:scroll-alloy-rpc-types-engine", + "reth-payload-primitives/scroll-alloy-traits", +]
diff --git reth/crates/engine/local/src/payload.rs scroll-reth/crates/engine/local/src/payload.rs index 34deaf3e10ceb8465aa37305733bd3198e37ae5d..79ba73303a842c83ba3330eb74a543c4e0e49c63 100644 --- reth/crates/engine/local/src/payload.rs +++ scroll-reth/crates/engine/local/src/payload.rs @@ -65,3 +65,20 @@ min_base_fee: None, } } } + +#[cfg(feature = "scroll-alloy-traits")] +impl<ChainSpec> PayloadAttributesBuilder<scroll_alloy_rpc_types_engine::ScrollPayloadAttributes> + for LocalPayloadAttributesBuilder<ChainSpec> +where + ChainSpec: Send + Sync + EthereumHardforks + 'static, +{ + fn build(&self, timestamp: u64) -> scroll_alloy_rpc_types_engine::ScrollPayloadAttributes { + scroll_alloy_rpc_types_engine::ScrollPayloadAttributes { + payload_attributes: self.build(timestamp), + transactions: None, + no_tx_pool: false, + block_data_hint: scroll_alloy_rpc_types_engine::BlockDataHint::none(), + gas_limit: None, + } + } +}
diff --git reth/crates/engine/primitives/src/config.rs scroll-reth/crates/engine/primitives/src/config.rs index e5f58523d0343130543d7c5d6496dbae0e7055df..9e2c8210f080e3ac6a9be92fa46966dbc870bb92 100644 --- reth/crates/engine/primitives/src/config.rs +++ scroll-reth/crates/engine/primitives/src/config.rs @@ -6,8 +6,31 @@ /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0;   -/// Default maximum concurrency for proof tasks +/// Default maximum concurrency for on-demand proof tasks (blinded nodes) pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; + +/// Minimum number of workers we allow configuring explicitly. +pub const MIN_WORKER_COUNT: usize = 32; + +/// Returns the default number of storage worker threads based on available parallelism. +fn default_storage_worker_count() -> usize { + #[cfg(feature = "std")] + { + std::thread::available_parallelism().map_or(8, |n| n.get() * 2).min(MIN_WORKER_COUNT) + } + #[cfg(not(feature = "std"))] + { + 8 + } +} + +/// Returns the default number of account worker threads. +/// +/// Account workers coordinate storage proof collection and account trie traversal. +/// They are set to the same count as storage workers for simplicity. +fn default_account_worker_count() -> usize { + default_storage_worker_count() +}   /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10; @@ -109,6 +132,10 @@ /// Maximum concurrency for the prewarm task. prewarm_max_concurrency: usize, /// Whether to unwind canonical header to ancestor during forkchoice updates. allow_unwind_canonical_header: bool, + /// Number of storage proof worker threads. + storage_worker_count: usize, + /// Number of account proof worker threads. + account_worker_count: usize, }   impl Default for TreeConfig { @@ -135,6 +162,8 @@ state_root_fallback: false, always_process_payload_attributes_on_canonical_head: false, prewarm_max_concurrency: DEFAULT_PREWARM_MAX_CONCURRENCY, allow_unwind_canonical_header: false, + storage_worker_count: default_storage_worker_count(), + account_worker_count: default_account_worker_count(), } } } @@ -164,7 +193,10 @@ state_root_fallback: bool, always_process_payload_attributes_on_canonical_head: bool, prewarm_max_concurrency: usize, allow_unwind_canonical_header: bool, + storage_worker_count: usize, + account_worker_count: usize, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { persistence_threshold, memory_block_buffer_target, @@ -187,6 +219,8 @@ state_root_fallback, always_process_payload_attributes_on_canonical_head, prewarm_max_concurrency, allow_unwind_canonical_header, + storage_worker_count, + account_worker_count, } }   @@ -394,6 +428,7 @@ pub const fn with_max_proof_task_concurrency( mut self, max_proof_task_concurrency: u64, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); self.max_proof_task_concurrency = max_proof_task_concurrency; self } @@ -451,5 +486,27 @@ /// Return the prewarm max concurrency. pub const fn prewarm_max_concurrency(&self) -> usize { self.prewarm_max_concurrency + } + + /// Return the number of storage proof worker threads. + pub const fn storage_worker_count(&self) -> usize { + self.storage_worker_count + } + + /// Setter for the number of storage proof worker threads. + pub fn with_storage_worker_count(mut self, storage_worker_count: usize) -> Self { + self.storage_worker_count = storage_worker_count.max(MIN_WORKER_COUNT); + self + } + + /// Return the number of account proof worker threads. + pub const fn account_worker_count(&self) -> usize { + self.account_worker_count + } + + /// Setter for the number of account proof worker threads. + pub fn with_account_worker_count(mut self, account_worker_count: usize) -> Self { + self.account_worker_count = account_worker_count.max(MIN_WORKER_COUNT); + self } }
diff --git reth/crates/engine/tree/benches/state_root_task.rs scroll-reth/crates/engine/tree/benches/state_root_task.rs index 9f61e62d2f9b3b0b584d7e27ecbf412e7e86d520..91a1213875580483bf79b927d017e95555dff108 100644 --- reth/crates/engine/tree/benches/state_root_task.rs +++ scroll-reth/crates/engine/tree/benches/state_root_task.rs @@ -66,11 +66,13 @@ transaction_id: 0, } } else { RevmAccount { + #[allow(clippy::needless_update)] info: AccountInfo { balance: U256::from(rng.random::<u64>()), nonce: rng.random::<u64>(), code_hash: KECCAK_EMPTY, code: Some(Default::default()), + ..Default::default() }, storage: (0..rng.random_range(0..=params.storage_slots_per_account)) .map(|_| { @@ -228,16 +230,22 @@ (genesis_hash, payload_processor, provider, state_updates) }, |(genesis_hash, mut payload_processor, provider, state_updates)| { black_box({ - let mut handle = payload_processor.spawn( - Default::default(), - core::iter::empty::< - Result<Recovered<TransactionSigned>, core::convert::Infallible>, - >(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::default(), - &TreeConfig::default(), - ); + let mut handle = payload_processor + .spawn( + Default::default(), + core::iter::empty::< + Result< + Recovered<TransactionSigned>, + core::convert::Infallible, + >, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + ConsistentDbView::new_with_latest_tip(provider).unwrap(), + TrieInput::default(), + &TreeConfig::default(), + ) + .map_err(|(err, ..)| err) + .expect("failed to spawn payload processor");   let mut state_hook = handle.state_hook();
diff --git reth/crates/engine/tree/src/test_utils.rs scroll-reth/crates/engine/tree/src/test_utils.rs index 2ec00f9b918d1ec73fb19b492f94742e943ce2d4..e011a54b73c234f18eefc0ba6e6f8f4e2f9c9664 100644 --- reth/crates/engine/tree/src/test_utils.rs +++ scroll-reth/crates/engine/tree/src/test_utils.rs @@ -3,9 +3,8 @@ use reth_chainspec::ChainSpec; use reth_ethereum_primitives::BlockBody; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives_traits::SealedHeader; -use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, +use reth_provider::test_utils::{ + create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; @@ -18,13 +17,12 @@ /// Test pipeline builder. #[derive(Default, Debug)] pub struct TestPipelineBuilder { pipeline_exec_outputs: VecDeque<Result<ExecOutput, StageError>>, - executor_results: Vec<ExecutionOutcome>, }   impl TestPipelineBuilder { /// Create a new [`TestPipelineBuilder`]. pub const fn new() -> Self { - Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() } + Self { pipeline_exec_outputs: VecDeque::new() } }   /// Set the pipeline execution outputs to use for the test consensus engine. @@ -37,8 +35,14 @@ self }   /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec<ExecutionOutcome>) -> Self { - self.executor_results = executor_results; + #[deprecated( + note = "no-op: executor results are not used and will be removed in a future release" + )] + pub fn with_executor_results( + self, + executor_results: Vec<reth_provider::ExecutionOutcome>, + ) -> Self { + let _ = executor_results; self }
diff --git reth/crates/engine/tree/src/tree/cached_state.rs scroll-reth/crates/engine/tree/src/tree/cached_state.rs index 9f4eb8398dfd9671f8c27aaae1c02373545a00ba..8553a9fe63c4fe3b46cc728fb8fefd51a4c63ced 100644 --- reth/crates/engine/tree/src/tree/cached_state.rs +++ scroll-reth/crates/engine/tree/src/tree/cached_state.rs @@ -1,5 +1,8 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{Address, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashSet}, + Address, StorageKey, StorageValue, B256, +}; use metrics::Gauge; use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; @@ -14,7 +17,6 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; -use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; use tracing::trace;   @@ -300,65 +302,69 @@ pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. code_cache: Cache<B256, Option<Bytecode>>,   - /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s - /// storage slots. - storage_cache: Cache<Address, AccountStorageCache>, + /// Flattened storage cache: composite key of (`Address`, `StorageKey`) maps directly to + /// values. + storage_cache: Cache<(Address, StorageKey), Option<StorageValue>>,   /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache<Address, Option<Account>>, }   impl ExecutionCache { - /// Get storage value from hierarchical cache. + /// Get storage value from flattened cache. /// /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The account's storage cache doesn't exist - /// - `Empty`: The slot exists in the account's cache but is empty + /// - `NotCached`: The storage slot is not in the cache + /// - `Empty`: The slot exists in the cache but is empty /// - `Value`: The slot exists and has a specific value pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { - match self.storage_cache.get(address) { + match self.storage_cache.get(&(*address, *key)) { None => SlotStatus::NotCached, - Some(account_cache) => account_cache.get_storage(key), + Some(None) => SlotStatus::Empty, + Some(Some(value)) => SlotStatus::Value(value), } }   - /// Insert storage value into hierarchical cache + /// Insert storage value into flattened cache pub(crate) fn insert_storage( &self, address: Address, key: StorageKey, value: Option<StorageValue>, ) { - self.insert_storage_bulk(address, [(key, value)]); + self.storage_cache.insert((address, key), value); }   - /// Insert multiple storage values into hierarchical cache for a single account + /// Insert multiple storage values into flattened cache for a single account /// - /// This method is optimized for inserting multiple storage values for the same address - /// by doing the account cache lookup only once instead of for each key-value pair. + /// This method inserts multiple storage values for the same address directly + /// into the flattened cache. pub(crate) fn insert_storage_bulk<I>(&self, address: Address, storage_entries: I) where I: IntoIterator<Item = (StorageKey, Option<StorageValue>)>, { - let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { - let account_cache = AccountStorageCache::default(); - self.storage_cache.insert(address, account_cache.clone()); - account_cache - }); - for (key, value) in storage_entries { - account_cache.insert_storage(key, value); + self.storage_cache.insert((address, key), value); } }   - /// Invalidate storage for specific account - pub(crate) fn invalidate_account_storage(&self, address: &Address) { - self.storage_cache.invalidate(address); - } - /// Returns the total number of storage slots cached across all accounts pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.iter().map(|addr| addr.len()).sum() + self.storage_cache.entry_count() as usize + } + + /// Invalidates the storage for all addresses in the set + pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { + // NOTE: this must collect because the invalidate function should not be called while we + // hold an iter for it + let storage_entries = self + .storage_cache + .iter() + .filter_map(|entry| addresses.contains(&entry.key().0).then_some(*entry.key())) + .collect::<Vec<_>>(); + for key in storage_entries { + self.storage_cache.invalidate(&key) + } }   /// Inserts the post-execution state changes into the cache. @@ -385,6 +391,7 @@ for (code_hash, bytecode) in &state_updates.contracts { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); }   + let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -397,7 +404,7 @@ if account.was_destroyed() { // Invalidate the account cache entry if destroyed self.account_cache.invalidate(addr);   - self.invalidate_account_storage(addr); + invalidated_accounts.insert(addr); continue }   @@ -423,6 +430,9 @@ // Insert will update if present, so we just use the new account info as the new value // for the account cache self.account_cache.insert(*addr, Some(Account::from(account_info))); } + + // invalidate storage for all destroyed accounts + self.invalidate_storages(invalidated_accounts);   Ok(()) } @@ -452,11 +462,11 @@ const EXPIRY_TIME: Duration = Duration::from_secs(7200); // 2 hours const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour   let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { - // values based on results from measure_storage_cache_overhead test - let base_weight = 39_000; - let slots_weight = value.len() * 218; - (base_weight + slots_weight) as u32 + .weigher(|_key: &(Address, StorageKey), _value: &Option<StorageValue>| -> u32 { + // Size of composite key (Address + StorageKey) + Option<StorageValue> + // Address: 20 bytes, StorageKey: 32 bytes, Option<StorageValue>: 33 bytes + // Plus some overhead for the hash map entry + 120_u32 }) .max_capacity(storage_cache_size) .time_to_live(EXPIRY_TIME) @@ -573,56 +583,6 @@ self.usage_guard.clone() } }   -/// Cache for an individual account's storage slots. -/// -/// This represents the second level of the hierarchical storage cache. -/// Each account gets its own `AccountStorageCache` to store accessed storage slots. -#[derive(Debug, Clone)] -pub(crate) struct AccountStorageCache { - /// Map of storage keys to their cached values. - slots: Cache<StorageKey, Option<StorageValue>>, -} - -impl AccountStorageCache { - /// Create a new [`AccountStorageCache`] - pub(crate) fn new(max_slots: u64) -> Self { - Self { - slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), - } - } - - /// Get a storage value from this account's cache. - /// - `NotCached`: The slot is not in the cache - /// - `Empty`: The slot is empty - /// - `Value`: The slot has a specific value - pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { - match self.slots.get(key) { - None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), - } - } - - /// Insert a storage value - pub(crate) fn insert_storage(&self, key: StorageKey, value: Option<StorageValue>) { - self.slots.insert(key, value); - } - - /// Returns the number of slots in the cache - pub(crate) fn len(&self) -> usize { - self.slots.entry_count() as usize - } -} - -impl Default for AccountStorageCache { - fn default() -> Self { - // With weigher and max_capacity in place, this number represents - // the maximum number of entries that can be stored, not the actual - // memory usage which is controlled by storage cache's max_capacity. - Self::new(1_000_000) - } -} - #[cfg(test)] mod tests { use super::*; @@ -697,32 +657,36 @@ }   #[test] fn measure_storage_cache_overhead() { - let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); - println!("Base AccountStorageCache overhead: {base_overhead} bytes"); + let (base_overhead, cache) = + measure_allocation(|| ExecutionCacheBuilder::default().build_caches(1000)); + println!("Base ExecutionCache overhead: {base_overhead} bytes"); let mut rng = rand::rng();   + let address = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::<u128>()); let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(key, Some(value)); + cache.insert_storage(address, key, Some(value)); }); println!("First slot insertion overhead: {first_slot} bytes");   const TOTAL_SLOTS: usize = 10_000; let (test_slots, _) = measure_allocation(|| { for _ in 0..TOTAL_SLOTS { + let addr = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::<u128>()); - cache.insert_storage(key, Some(value)); + cache.insert_storage(addr, key, Some(value)); } }); println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS);   println!("\nTheoretical sizes:"); + println!("Address size: {} bytes", size_of::<Address>()); println!("StorageKey size: {} bytes", size_of::<StorageKey>()); println!("StorageValue size: {} bytes", size_of::<StorageValue>()); println!("Option<StorageValue> size: {} bytes", size_of::<Option<StorageValue>>()); - println!("Option<B256> size: {} bytes", size_of::<Option<B256>>()); + println!("(Address, StorageKey) size: {} bytes", size_of::<(Address, StorageKey)>()); }   #[test]
diff --git reth/crates/engine/tree/src/tree/metrics.rs scroll-reth/crates/engine/tree/src/tree/metrics.rs index 4d3310543d1c87a2f9ade42c8da1e5842c33c80b..c014d8ba15e427134ce9ca61dbe28d038ac19b26 100644 --- reth/crates/engine/tree/src/tree/metrics.rs +++ scroll-reth/crates/engine/tree/src/tree/metrics.rs @@ -122,6 +122,10 @@ /// The number of reorgs pub reorgs: Counter, /// The latest reorg depth pub latest_reorg_depth: Gauge, + /// The current safe block height (this is required by optimism) + pub safe_block_height: Gauge, + /// The current finalized block height (this is required by optimism) + pub finalized_block_height: Gauge, }   /// Metrics for the `EngineApi`. @@ -310,6 +314,7 @@ BlockExecutionResult { receipts: vec![], requests: Requests::default(), gas_used: 1000, + blob_gas_used: 0, }, )) }
diff --git reth/crates/engine/tree/src/tree/mod.rs scroll-reth/crates/engine/tree/src/tree/mod.rs index 24bdc069f098e60399b609943190a16756237ea9..7f1183f5efc1cd2eae6f0c0c193a225ea22ac114 100644 --- reth/crates/engine/tree/src/tree/mod.rs +++ scroll-reth/crates/engine/tree/src/tree/mod.rs @@ -1015,23 +1015,79 @@ attrs: Option<T::PayloadAttributes>, version: EngineApiMessageVersion, ) -> ProviderResult<TreeOutcome<OnForkChoiceUpdated>> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); + + // Record metrics + self.record_forkchoice_metrics(&attrs); + + // Pre-validation of forkchoice state + if let Some(early_result) = self.validate_forkchoice_state(state)? { + return Ok(TreeOutcome::new(early_result)); + } + + // Return early if we are on the correct fork + if let Some(result) = self.handle_canonical_head(state, &attrs, version)? { + return Ok(result); + } + + // Attempt to apply a chain update when the head differs from our canonical chain. + // This handles reorgs and chain extensions by making the specified head canonical. + if let Some(result) = self.apply_chain_update(state, &attrs, version)? { + return Ok(result); + } + + // Fallback that ensures to catch up to the network's state. + self.handle_missing_block(state) + } + + /// Records metrics for forkchoice updated calls + fn record_forkchoice_metrics(&self, attrs: &Option<T::PayloadAttributes>) { self.metrics.engine.forkchoice_updated_messages.increment(1); if attrs.is_some() { self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); } self.canonical_in_memory_state.on_forkchoice_update_received(); + }   - if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { - return Ok(TreeOutcome::new(on_updated)) + /// Pre-validates the forkchoice state and returns early if validation fails. + /// + /// Returns `Some(OnForkChoiceUpdated)` if validation fails and an early response should be + /// returned. Returns `None` if validation passes and processing should continue. + fn validate_forkchoice_state( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult<Option<OnForkChoiceUpdated>> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())); + } + + // Check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))); + } + + if !self.backfill_sync_state.is_idle() { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())); }   - let valid_outcome = |head| { - TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(head), - ))) - }; + Ok(None) + }   + /// Handles the case where the forkchoice head is already canonical. + /// + /// Returns `Some(TreeOutcome<OnForkChoiceUpdated>)` if the head is already canonical and + /// processing is complete. Returns `None` if the head is not canonical and processing + /// should continue. + fn handle_canonical_head( + &self, + state: ForkchoiceState, + attrs: &Option<T::PayloadAttributes>, // Changed to reference + version: EngineApiMessageVersion, + ) -> ProviderResult<Option<TreeOutcome<OnForkChoiceUpdated>>> { // Process the forkchoice update by trying to make the head block canonical // // We can only process this forkchoice update if: @@ -1046,34 +1102,58 @@ // - updating canonical state trackers // - emitting a canonicalization event for the new chain (including reorg) // - if we have payload attributes, delegate them to the payload service   - // 1. ensure we have a new head block - if self.state.tree_state.canonical_block_hash() == state.head_block_hash { - trace!(target: "engine::tree", "fcu head hash is already canonical"); + if self.state.tree_state.canonical_block_hash() != state.head_block_hash { + return Ok(None); + }   - // update the safe and finalized blocks and ensure their values are valid - if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { - // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) - } + trace!(target: "engine::tree", "fcu head hash is already canonical");   - // we still need to process payload attributes if the head is already canonical - if let Some(attr) = attrs { - let tip = self - .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? - .ok_or_else(|| { - // If we can't find the canonical block, then something is wrong and we need - // to return an error - ProviderError::HeaderNotFound(state.head_block_hash.into()) - })?; - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) - } + // Update the safe and finalized blocks and ensure their values are valid + if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { + // safe or finalized hashes are invalid + return Ok(Some(TreeOutcome::new(outcome))); + }   - // the head block is already canonical - return Ok(valid_outcome(state.head_block_hash)) + // Process payload attributes if the head is already canonical + if let Some(attr) = attrs { + let tip = self + .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? + .ok_or_else(|| { + // If we can't find the canonical block, then something is wrong and we need + // to return an error + ProviderError::HeaderNotFound(state.head_block_hash.into()) + })?; + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); }   - // 2. check if the head is already part of the canonical chain + // The head block is already canonical + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + Ok(Some(outcome)) + } + + /// Applies chain update for the new head block and processes payload attributes. + /// + /// This method handles the case where the forkchoice head differs from our current canonical + /// head. It attempts to make the specified head block canonical by: + /// - Checking if the head is already part of the canonical chain + /// - Applying chain reorganizations (reorgs) if necessary + /// - Processing payload attributes if provided + /// - Returning the appropriate forkchoice update response + /// + /// Returns `Some(TreeOutcome<OnForkChoiceUpdated>)` if a chain update was successfully applied. + /// Returns `None` if no chain update was needed or possible. + fn apply_chain_update( + &mut self, + state: ForkchoiceState, + attrs: &Option<T::PayloadAttributes>, + version: EngineApiMessageVersion, + ) -> ProviderResult<Option<TreeOutcome<OnForkChoiceUpdated>>> { + // Check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical");   @@ -1084,9 +1164,14 @@ self.config.always_process_payload_attributes_on_canonical_head() { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); - let updated = - self.process_payload_attributes(attr, &canonical_header, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes( + attr.clone(), + &canonical_header, + state, + version, + ); + return Ok(Some(TreeOutcome::new(updated))); }   // At this point, no alternative block has been triggered, so we need effectively @@ -1095,52 +1180,75 @@ // chain. We need to update the latest block state to reflect the // canonical ancestor. This ensures that state providers and the // transaction pool operate with the correct chain state after // forkchoice update processing. + if self.config.unwind_canonical_header() { self.update_latest_block_to_canonical_ancestor(&canonical_header)?; } }   - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` - // ancestor of the head of canonical chain, i.e. the ancestor passed payload - // validation process and deemed `VALID`. In the case of such an event, client - // software MUST return `{payloadStatus: {status: VALID, latestValidHash: - // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + // According to the Engine API specification, client software MAY skip an update of the + // forkchoice state and MUST NOT begin a payload build process if + // `forkchoiceState.headBlockHash` references a `VALID` ancestor of the head + // of canonical chain, i.e. the ancestor passed payload validation process + // and deemed `VALID`. In the case of such an event, client software MUST + // return `{payloadStatus: {status: VALID, latestValidHash: + // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + + // The head block is already canonical and we're not processing payload attributes, + // so we're not triggering a payload job and can return right away   - // the head block is already canonical, so we're not triggering a payload job and can - // return right away - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); }   - // 3. ensure we can apply a new chain update for the head block + // Ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { let tip = chain_update.tip().clone_sealed_header(); self.on_canonical_chain_update(chain_update);   - // update the safe and finalized blocks and ensure their values are valid + // Update the safe and finalized blocks and ensure their values are valid if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) + return Ok(Some(TreeOutcome::new(outcome))); }   if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); }   - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); }   - // 4. we don't have the block to perform the update - // we assume the FCU is valid and at least the head is missing, + Ok(None) + } + + /// Handles the case where the head block is missing and needs to be downloaded. + /// + /// This is the fallback case when all other forkchoice update scenarios have been exhausted. + /// Returns a `TreeOutcome` with syncing status and download event. + fn handle_missing_block( + &self, + state: ForkchoiceState, + ) -> ProviderResult<TreeOutcome<OnForkChoiceUpdated>> { + // We don't have the block to perform the forkchoice update + // We assume the FCU is valid and at least the head is missing, // so we need to start syncing to it // // find the appropriate target to sync to, if we don't have the safe block hash then we // start syncing to the safe block via backfill first let target = if self.state.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() + // check that safe block is valid and missing + !state.safe_block_hash.is_zero() && + self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() { debug!(target: "engine::tree", "missing safe block on initial FCU, downloading safe block"); state.safe_block_hash @@ -1929,8 +2037,18 @@ /// to a forkchoice update. fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult<Option<PayloadStatus>> { // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent)?)) + + // Try to prepare invalid response, but handle errors gracefully + match self.prepare_invalid_response(header.parent) { + Ok(status) => Ok(Some(status)), + Err(err) => { + debug!(target: "engine::tree", %err, "Failed to prepare invalid response for ancestor check"); + // Return a basic invalid status without latest valid hash + Ok(Some(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }))) + } + } }   /// Validate if block is correct and satisfies all the consensus rules that concern the header @@ -2687,7 +2805,9 @@ { // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: <https://github.com/ethereum-optimism/optimism/blob/c383eb880f307caa3ca41010ec10f30f08396b2e/op-node/rollup/sync/start.go#L65-L65> let _ = self.persistence.save_finalized_block_number(finalized.number()); - self.canonical_in_memory_state.set_finalized(finalized); + self.canonical_in_memory_state.set_finalized(finalized.clone()); + // Update finalized block height metric + self.metrics.tree.finalized_block_height.set(finalized.number() as f64); } } Err(err) => { @@ -2715,7 +2835,9 @@ if Some(safe.num_hash()) != self.canonical_in_memory_state.get_safe_num_hash() { // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: <https://github.com/ethereum-optimism/optimism/blob/c383eb880f307caa3ca41010ec10f30f08396b2e/op-node/rollup/sync/start.go#L65-L65> let _ = self.persistence.save_safe_block_number(safe.number()); - self.canonical_in_memory_state.set_safe(safe); + self.canonical_in_memory_state.set_safe(safe.clone()); + // Update safe block height metric + self.metrics.tree.safe_block_height.set(safe.number() as f64); } } Err(err) => { @@ -2751,35 +2873,6 @@ // // This ensures that the safe block is consistent with the head block, i.e. the safe // block is an ancestor of the head block. self.update_safe_block(state.safe_block_hash) - } - - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult<Option<OnForkChoiceUpdated>> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if !self.backfill_sync_state.is_idle() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) }   /// Validates the payload attributes with respect to the header and fork choice state.
diff --git reth/crates/engine/tree/src/tree/payload_processor/mod.rs scroll-reth/crates/engine/tree/src/tree/payload_processor/mod.rs index 8d9bd1ba2e0e40136d5b82d64d0cb9966291ca0a..e3090d60756d017ad248e421b3e6a83813d18247 100644 --- reth/crates/engine/tree/src/tree/payload_processor/mod.rs +++ scroll-reth/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -32,7 +32,7 @@ }; use reth_revm::{db::BundleState, state::EvmState}; use reth_trie::TrieInput; use reth_trie_parallel::{ - proof_task::{ProofTaskCtx, ProofTaskManager}, + proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, }; use reth_trie_sparse::{ @@ -45,7 +45,7 @@ atomic::AtomicBool, mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, instrument}; +use tracing::{debug, instrument, warn};   mod configured_sparse_trie; pub mod executor; @@ -166,6 +166,7 @@ /// /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) + #[allow(clippy::type_complexity)] pub fn spawn<P, I: ExecutableTxIterator<Evm>>( &mut self, env: ExecutionEnv<Evm>, @@ -174,7 +175,10 @@ provider_builder: StateProviderBuilder<N, P>, consistent_view: ConsistentDbView<P>, trie_input: TrieInput, config: &TreeConfig, - ) -> PayloadHandle<WithTxEnv<TxEnvFor<Evm>, I::Tx>, I::Error> + ) -> Result< + PayloadHandle<WithTxEnv<TxEnvFor<Evm>, I::Tx>, I::Error>, + (ParallelStateRootError, I, ExecutionEnv<Evm>, StateProviderBuilder<N, P>), + > where P: DatabaseProviderFactory<Provider: BlockReader> + BlockReader @@ -185,8 +189,7 @@ + 'static, { let (to_sparse_trie, sparse_trie_rx) = channel(); // spawn multiproof task, save the trie input - let (trie_input, state_root_config) = - MultiProofConfig::new_from_input(consistent_view, trie_input); + let (trie_input, state_root_config) = MultiProofConfig::from_input(trie_input); self.trie_input = Some(trie_input);   // Create and spawn the storage proof task @@ -195,12 +198,15 @@ state_root_config.nodes_sorted.clone(), state_root_config.state_sorted.clone(), state_root_config.prefix_sets.clone(), ); + let storage_worker_count = config.storage_worker_count(); + let account_worker_count = config.account_worker_count(); let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; - let proof_task = ProofTaskManager::new( + let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), - state_root_config.consistent_view.clone(), + consistent_view, task_ctx, - max_proof_task_concurrency, + storage_worker_count, + account_worker_count, );   // We set it to half of the proof task concurrency, because often for each multiproof we @@ -209,7 +215,7 @@ let max_multi_proof_task_concurrency = max_proof_task_concurrency / 2; let multi_proof_task = MultiProofTask::new( state_root_config, self.executor.clone(), - proof_task.handle(), + proof_handle.clone(), to_sparse_trie, max_multi_proof_task_concurrency, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), @@ -238,26 +244,14 @@ // wire the sparse trie to the state root response receiver let (state_root_tx, state_root_rx) = channel();   // Spawn the sparse trie task using any stored trie and parallel trie configuration. - self.spawn_sparse_trie_task(sparse_trie_rx, proof_task.handle(), state_root_tx); - - // spawn the proof task - self.executor.spawn_blocking(move || { - if let Err(err) = proof_task.run() { - // At least log if there is an error at any point - tracing::error!( - target: "engine::root", - ?err, - "Storage proof task returned an error" - ); - } - }); + self.spawn_sparse_trie_task(sparse_trie_rx, proof_handle, state_root_tx);   - PayloadHandle { + Ok(PayloadHandle { to_multi_proof, prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, - } + }) }   /// Spawns a task that exclusively handles cache prewarming for transaction execution. @@ -392,7 +386,7 @@ /// Spawns the [`SparseTrieTask`] for this payload processor. fn spawn_sparse_trie_task<BPF>( &self, sparse_trie_rx: mpsc::Receiver<SparseTrieUpdate>, - proof_task_handle: BPF, + proof_worker_handle: BPF, state_root_tx: mpsc::Sender<Result<StateRootComputeOutcome, ParallelStateRootError>>, ) where BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, @@ -422,7 +416,7 @@ let task = SparseTrieTask::<_, ConfiguredSparseTrie, ConfiguredSparseTrie>::new_with_cleared_trie( sparse_trie_rx, - proof_task_handle, + proof_worker_handle, self.trie_metrics.clone(), sparse_state_trie, ); @@ -857,14 +851,20 @@ &TreeConfig::default(), PrecompileCacheMap::default(), ); let provider = BlockchainProvider::new(factory).unwrap(); - let mut handle = payload_processor.spawn( - Default::default(), - core::iter::empty::<Result<Recovered<TransactionSigned>, core::convert::Infallible>>(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::from_state(hashed_state), - &TreeConfig::default(), - ); + let mut handle = + payload_processor + .spawn( + Default::default(), + core::iter::empty::< + Result<Recovered<TransactionSigned>, core::convert::Infallible>, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + ConsistentDbView::new_with_latest_tip(provider).unwrap(), + TrieInput::from_state(hashed_state), + &TreeConfig::default(), + ) + .map_err(|(err, ..)| err) + .expect("failed to spawn payload processor");   let mut state_hook = handle.state_hook();
diff --git reth/crates/engine/tree/src/tree/payload_processor/multiproof.rs scroll-reth/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 6c7f5de40a38dd9e6fad9a10e8093cec7cc2d531..a528b7595701178d48e47e53abe743db44a3923a 100644 --- reth/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ scroll-reth/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -12,14 +12,17 @@ use derive_more::derive::Deref; use metrics::Histogram; use reth_errors::ProviderError; use reth_metrics::Metrics; -use reth_provider::{providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, FactoryTx}; use reth_revm::state::EvmState; use reth_trie::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, DecodedMultiProof, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProofTargets, TrieInput, }; -use reth_trie_parallel::{proof::ParallelProof, proof_task::ProofTaskManagerHandle}; +use reth_trie_parallel::{ + proof::ParallelProof, + proof_task::{AccountMultiproofInput, ProofWorkerHandle}, + root::ParallelStateRootError, +}; use std::{ collections::{BTreeMap, VecDeque}, ops::DerefMut, @@ -62,9 +65,7 @@ }   /// Common configuration for multi proof tasks #[derive(Debug, Clone)] -pub(super) struct MultiProofConfig<Factory> { - /// View over the state in the database. - pub consistent_view: ConsistentDbView<Factory>, +pub(super) struct MultiProofConfig { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc<TrieUpdatesSorted>, @@ -76,17 +77,13 @@ /// if we have cached nodes for them. pub prefix_sets: Arc<TriePrefixSetsMut>, }   -impl<Factory> MultiProofConfig<Factory> { - /// Creates a new state root config from the consistent view and the trie input. +impl MultiProofConfig { + /// Creates a new state root config from the trie input. /// /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the /// [`TrieInput`]. - pub(super) fn new_from_input( - consistent_view: ConsistentDbView<Factory>, - mut input: TrieInput, - ) -> (TrieInput, Self) { + pub(super) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { let config = Self { - consistent_view, nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), state_sorted: Arc::new(input.state.drain_into_sorted()), prefix_sets: Arc::new(input.prefix_sets.clone()), @@ -245,14 +242,14 @@ }   /// A pending multiproof task, either [`StorageMultiproofInput`] or [`MultiproofInput`]. #[derive(Debug)] -enum PendingMultiproofTask<Factory> { +enum PendingMultiproofTask { /// A storage multiproof task input. - Storage(StorageMultiproofInput<Factory>), + Storage(StorageMultiproofInput), /// A regular multiproof task input. - Regular(MultiproofInput<Factory>), + Regular(MultiproofInput), }   -impl<Factory> PendingMultiproofTask<Factory> { +impl PendingMultiproofTask { /// Returns the proof sequence number of the task. const fn proof_sequence_number(&self) -> u64 { match self { @@ -278,22 +275,22 @@ } } }   -impl<Factory> From<StorageMultiproofInput<Factory>> for PendingMultiproofTask<Factory> { - fn from(input: StorageMultiproofInput<Factory>) -> Self { +impl From<StorageMultiproofInput> for PendingMultiproofTask { + fn from(input: StorageMultiproofInput) -> Self { Self::Storage(input) } }   -impl<Factory> From<MultiproofInput<Factory>> for PendingMultiproofTask<Factory> { - fn from(input: MultiproofInput<Factory>) -> Self { +impl From<MultiproofInput> for PendingMultiproofTask { + fn from(input: MultiproofInput) -> Self { Self::Regular(input) } }   /// Input parameters for spawning a dedicated storage multiproof calculation. #[derive(Debug)] -struct StorageMultiproofInput<Factory> { - config: MultiProofConfig<Factory>, +struct StorageMultiproofInput { + config: MultiProofConfig, source: Option<StateChangeSource>, hashed_state_update: HashedPostState, hashed_address: B256, @@ -303,7 +300,7 @@ state_root_message_sender: Sender<MultiProofMessage>, multi_added_removed_keys: Arc<MultiAddedRemovedKeys>, }   -impl<Factory> StorageMultiproofInput<Factory> { +impl StorageMultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -315,8 +312,8 @@ }   /// Input parameters for spawning a multiproof calculation. #[derive(Debug)] -struct MultiproofInput<Factory> { - config: MultiProofConfig<Factory>, +struct MultiproofInput { + config: MultiProofConfig, source: Option<StateChangeSource>, hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, @@ -325,7 +322,7 @@ state_root_message_sender: Sender<MultiProofMessage>, multi_added_removed_keys: Option<Arc<MultiAddedRemovedKeys>>, }   -impl<Factory> MultiproofInput<Factory> { +impl MultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -340,17 +337,17 @@ /// Takes care of not having more calculations in flight than a given maximum /// concurrency, further calculation requests are queued and spawn later, after /// availability has been signaled. #[derive(Debug)] -pub struct MultiproofManager<Factory: DatabaseProviderFactory> { +pub struct MultiproofManager { /// Maximum number of concurrent calculations. max_concurrent: usize, /// Currently running calculations. inflight: usize, /// Queued calculations. - pending: VecDeque<PendingMultiproofTask<Factory>>, + pending: VecDeque<PendingMultiproofTask>, /// Executor for tasks executor: WorkloadExecutor, - /// Sender to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle<FactoryTx<Factory>>, + /// Handle to the proof worker pools (storage and account). + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. /// @@ -367,15 +364,12 @@ /// Metrics metrics: MultiProofTaskMetrics, }   -impl<Factory> MultiproofManager<Factory> -where - Factory: DatabaseProviderFactory<Provider: BlockReader> + Clone + 'static, -{ +impl MultiproofManager { /// Creates a new [`MultiproofManager`]. fn new( executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, - storage_proof_task_handle: ProofTaskManagerHandle<FactoryTx<Factory>>, + proof_worker_handle: ProofWorkerHandle, max_concurrent: usize, ) -> Self { Self { @@ -384,7 +378,7 @@ max_concurrent, executor, inflight: 0, metrics, - storage_proof_task_handle, + proof_worker_handle, missed_leaves_storage_roots: Default::default(), } } @@ -395,7 +389,7 @@ }   /// Spawns a new multiproof calculation or enqueues it for later if /// `max_concurrent` are already inflight. - fn spawn_or_queue(&mut self, input: PendingMultiproofTask<Factory>) { + fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -429,7 +423,7 @@ }   /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage /// multiproof, and dispatching to `spawn_multiproof` otherwise. - fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask<Factory>) { + fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { match input { PendingMultiproofTask::Storage(storage_input) => { self.spawn_storage_proof(storage_input); @@ -441,7 +435,7 @@ } }   /// Spawns a single storage proof calculation task. - fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput<Factory>) { + fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { let StorageMultiproofInput { config, source, @@ -453,7 +447,7 @@ state_root_message_sender, multi_added_removed_keys, } = storage_multiproof_input;   - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let storage_proof_worker_handle = self.proof_worker_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone();   self.executor.spawn_blocking(move || { @@ -468,12 +462,11 @@ "Starting dedicated storage proof calculation", ); let start = Instant::now(); let proof_result = ParallelProof::new( - config.consistent_view, config.nodes_sorted, config.state_sorted, config.prefix_sets, missed_leaves_storage_roots, - storage_proof_task_handle.clone(), + storage_proof_worker_handle, ) .with_branch_node_masks(true) .with_multi_added_removed_keys(Some(multi_added_removed_keys)) @@ -516,7 +509,7 @@ self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); }   /// Spawns a single multiproof calculation task. - fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput<Factory>) { + fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { let MultiproofInput { config, source, @@ -526,7 +519,7 @@ proof_sequence_number, state_root_message_sender, multi_added_removed_keys, } = multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let account_proof_worker_handle = self.proof_worker_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone();   self.executor.spawn_blocking(move || { @@ -544,17 +537,32 @@ "Starting multiproof calculation", );   let start = Instant::now(); - let proof_result = ParallelProof::new( - config.consistent_view, - config.nodes_sorted, - config.state_sorted, - config.prefix_sets, + + // Extend prefix sets with targets + let frozen_prefix_sets = + ParallelProof::extend_prefix_sets_with_targets(&config.prefix_sets, &proof_targets); + + // Queue account multiproof to worker pool + let input = AccountMultiproofInput { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, missed_leaves_storage_roots, - storage_proof_task_handle.clone(), - ) - .with_branch_node_masks(true) - .with_multi_added_removed_keys(multi_added_removed_keys) - .decoded_multiproof(proof_targets); + }; + + let proof_result: Result<DecodedMultiProof, ParallelStateRootError> = (|| { + let receiver = account_proof_worker_handle + .dispatch_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; + + receiver + .recv() + .map_err(|_| { + ParallelStateRootError::Other("Account multiproof channel closed".into()) + })? + .map(|(proof, _stats)| proof) + })(); let elapsed = start.elapsed(); trace!( target: "engine::root", @@ -645,13 +653,13 @@ /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. /// This feeds updates to the sparse trie task. #[derive(Debug)] -pub(super) struct MultiProofTask<Factory: DatabaseProviderFactory> { +pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. /// /// If [`None`], then chunking is disabled. chunk_size: Option<usize>, /// Task configuration. - config: MultiProofConfig<Factory>, + config: MultiProofConfig, /// Receiver for state root related messages. rx: Receiver<MultiProofMessage>, /// Sender for state root related messages. @@ -665,20 +673,17 @@ multi_added_removed_keys: MultiAddedRemovedKeys, /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// Manages calculation of multiproofs. - multiproof_manager: MultiproofManager<Factory>, + multiproof_manager: MultiproofManager, /// multi proof task metrics metrics: MultiProofTaskMetrics, }   -impl<Factory> MultiProofTask<Factory> -where - Factory: DatabaseProviderFactory<Provider: BlockReader> + Clone + 'static, -{ +impl MultiProofTask { /// Creates a new multi proof task with the unified message channel pub(super) fn new( - config: MultiProofConfig<Factory>, + config: MultiProofConfig, executor: WorkloadExecutor, - proof_task_handle: ProofTaskManagerHandle<FactoryTx<Factory>>, + proof_worker_handle: ProofWorkerHandle, to_sparse_trie: Sender<SparseTrieUpdate>, max_concurrency: usize, chunk_size: Option<usize>, @@ -698,7 +703,7 @@ proof_sequencer: ProofSequencer::default(), multiproof_manager: MultiproofManager::new( executor, metrics.clone(), - proof_task_handle, + proof_worker_handle, max_concurrency, ), metrics, @@ -1202,44 +1207,31 @@ #[cfg(test)] mod tests { use super::*; use alloy_primitives::map::B256Set; - use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, BlockReader, + DatabaseProviderFactory, + }; use reth_trie::{MultiProof, TrieInput}; - use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofTaskManager}; + use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; - use std::sync::Arc;   - fn create_state_root_config<F>(factory: F, input: TrieInput) -> MultiProofConfig<F> - where - F: DatabaseProviderFactory<Provider: BlockReader> + Clone + 'static, - { - let consistent_view = ConsistentDbView::new(factory, None); - let nodes_sorted = Arc::new(input.nodes.clone().into_sorted()); - let state_sorted = Arc::new(input.state.clone().into_sorted()); - let prefix_sets = Arc::new(input.prefix_sets); - - MultiProofConfig { consistent_view, nodes_sorted, state_sorted, prefix_sets } - } - - fn create_test_state_root_task<F>(factory: F) -> MultiProofTask<F> + fn create_test_state_root_task<F>(factory: F) -> MultiProofTask where F: DatabaseProviderFactory<Provider: BlockReader> + Clone + 'static, { let executor = WorkloadExecutor::default(); - let config = create_state_root_config(factory, TrieInput::default()); + let (_trie_input, config) = MultiProofConfig::from_input(TrieInput::default()); let task_ctx = ProofTaskCtx::new( config.nodes_sorted.clone(), config.state_sorted.clone(), config.prefix_sets.clone(), ); - let proof_task = ProofTaskManager::new( - executor.handle().clone(), - config.consistent_view.clone(), - task_ctx, - 1, - ); + let consistent_view = ConsistentDbView::new(factory, None); + let proof_handle = + ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1); let channel = channel();   - MultiProofTask::new(config, executor, proof_task.handle(), channel.0, 1, None) + MultiProofTask::new(config, executor, proof_handle, channel.0, 1, None) }   #[test]
diff --git reth/crates/engine/tree/src/tree/payload_validator.rs scroll-reth/crates/engine/tree/src/tree/payload_validator.rs index cd2c37d1e912623a391587df770a6e034c5b1b8a..f3525d8e9c71a7ed5300d193e7d93b0d95604695 100644 --- reth/crates/engine/tree/src/tree/payload_validator.rs +++ scroll-reth/crates/engine/tree/src/tree/payload_validator.rs @@ -19,7 +19,7 @@ use alloy_primitives::B256; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, }; -use reth_consensus::{ConsensusError, FullConsensus}; +use reth_consensus::{ConsensusError, FullConsensus, HeaderValidator}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, }; @@ -43,6 +43,7 @@ use reth_revm::db::State; use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, TrieInput}; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; use tracing::{debug, debug_span, error, info, trace, warn};   @@ -467,7 +468,7 @@ Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure - if state_root == block.header().state_root() { + if self.consensus.validate_state_root(block.header(), state_root).is_ok() { maybe_state_root = Some((state_root, trie_updates, elapsed)) } else { warn!( @@ -492,13 +493,15 @@ &hashed_state, ctx.state(), ) { Ok(result) => { + let elapsed = root_time.elapsed(); info!( target: "engine::tree", block = ?block_num_hash, regular_state_root = ?result.0, + ?elapsed, "Regular root task finished" ); - maybe_state_root = Some((result.0, result.1, root_time.elapsed())); + maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { debug!(target: "engine::tree", %error, "Parallel state root computation failed"); @@ -535,7 +538,7 @@ self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root");   // ensure state root matches - if state_root != block.header().state_root() { + if self.consensus.validate_state_root(block.header(), state_root).is_err() { // call post-block hook self.on_invalid_block( &parent_block, @@ -640,7 +643,7 @@ V: PayloadValidator<T, Block = N::Block>, T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = N>>, Evm: ConfigureEngineEvm<T::ExecutionData, Primitives = N>, { - let num_hash = NumHash::new(env.evm_env.block_env.number.to(), env.hash); + let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash);   let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); let _enter = span.enter(); @@ -877,17 +880,36 @@ // Use state root task only if prefix sets are empty, otherwise proof generation is // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); let (handle, strategy) = if trie_input.prefix_sets.is_empty() { - ( - self.payload_processor.spawn( - env, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ), - StateRootStrategy::StateRootTask, - ) + match self.payload_processor.spawn( + env, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ) { + Ok(handle) => { + // Successfully spawned with state root task support + (handle, StateRootStrategy::StateRootTask) + } + Err((error, txs, env, provider_builder)) => { + // Failed to spawn proof workers, fallback to parallel state root + error!( + target: "engine::tree", + block=?block_num_hash, + ?error, + "Failed to spawn proof workers, falling back to parallel state root" + ); + ( + self.payload_processor.spawn_cache_exclusive( + env, + txs, + provider_builder, + ), + StateRootStrategy::Parallel, + ) + } + } // if prefix sets are not empty, we spawn a task that exclusively handles cache // prewarming for transaction execution } else {
diff --git reth/crates/engine/tree/src/tree/precompile_cache.rs scroll-reth/crates/engine/tree/src/tree/precompile_cache.rs index c88cb4bc7209a679d6df352f35698a6287a69e2d..e58c491188793599783d7cc6c8656702dbaebbfd 100644 --- reth/crates/engine/tree/src/tree/precompile_cache.rs +++ scroll-reth/crates/engine/tree/src/tree/precompile_cache.rs @@ -273,9 +273,9 @@ }   #[test] fn test_precompile_cache_basic() { - let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult { + let dyn_precompile: DynPrecompile = (|_input: PrecompileInput<'_>| -> PrecompileResult { Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false }) - } + }) .into();   let cache =
diff --git reth/crates/engine/tree/src/tree/tests.rs scroll-reth/crates/engine/tree/src/tree/tests.rs index b2774b8b17e39702c308fce064b23c837c57d061..17b5950e0778e888ded8737cf1b31983794141a3 100644 --- reth/crates/engine/tree/src/tree/tests.rs +++ scroll-reth/crates/engine/tree/src/tree/tests.rs @@ -56,6 +56,7 @@ let block = reth_ethereum_primitives::Block::try_from(payload.payload).map_err(|e| { reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) })?; let sealed = block.seal_slow(); + sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) } } @@ -1705,3 +1706,305 @@ sidecar: ExecutionPayloadSidecar::none(), } } } + +/// Test suite for the refactored `on_forkchoice_updated` helper methods +#[cfg(test)] +mod forkchoice_updated_tests { + use super::*; + use alloy_primitives::Address; + + /// Test that validates the forkchoice state pre-validation logic + #[tokio::test] + async fn test_validate_forkchoice_state() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Test 1: Zero head block hash should return early with invalid state + let zero_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(zero_state).unwrap(); + assert!(result.is_some(), "Zero head block hash should return early"); + let outcome = result.unwrap(); + // For invalid state, we expect an error response + assert!(matches!(outcome, OnForkChoiceUpdated { .. })); + + // Test 2: Valid state with backfill active should return syncing + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_some(), "Backfill active should return early"); + let outcome = result.unwrap(); + // We need to await the outcome to check the payload status + let fcu_result = outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + + // Test 3: Valid state with idle backfill should continue processing + test_harness.tree.backfill_sync_state = BackfillSyncState::Idle; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_none(), "Valid state should continue processing"); + } + + /// Test that verifies canonical head handling + #[tokio::test] + async fn test_handle_canonical_head() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test 1: Head is already canonical, no payload attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should return outcome for canonical head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Head is not canonical - should return None to continue processing + let non_canonical_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(non_canonical_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Non-canonical head should return None"); + } + + /// Test that verifies chain update application + #[tokio::test] + async fn test_apply_chain_update() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create a chain of blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let new_head = blocks[2].recovered_block().hash(); + + // Test 1: Apply chain update to a new head + let state = ForkchoiceState { + head_block_hash: new_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should apply chain update for new head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Try to apply chain update to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(missing_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Missing block should return None"); + } + + /// Test that verifies missing block handling + #[tokio::test] + async fn test_handle_missing_block() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + let state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.handle_missing_block(state).unwrap(); + + // Should return syncing status with download event + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some()); + + if let Some(TreeEvent::Download(download_request)) = result.event { + match download_request { + DownloadRequest::BlockSet(block_set) => { + assert_eq!(block_set.len(), 1); + } + _ => panic!("Expected single block download request"), + } + } + } + + /// Test the complete `on_forkchoice_updated` flow with all helper methods + #[tokio::test] + async fn test_on_forkchoice_updated_integration() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test Case 1: FCU to existing canonical head + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: canonical_head, + finalized_block_hash: canonical_head, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test Case 2: FCU to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(missing_state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some(), "Should trigger download event for missing block"); + + // Test Case 3: FCU during backfill sync + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing(), "Should return syncing during backfill"); + } + + /// Test metrics recording in forkchoice updated + #[tokio::test] + async fn test_record_forkchoice_metrics() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + // Get initial metrics state by checking if metrics are recorded + // We can't directly get counter values, but we can verify the methods are called + + // Test without attributes + let attrs_none = None; + test_harness.tree.record_forkchoice_metrics(&attrs_none); + + // Test with attributes + let attrs_some = Some(alloy_rpc_types_engine::PayloadAttributes { + timestamp: 1000, + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: None, + parent_beacon_block_root: None, + }); + test_harness.tree.record_forkchoice_metrics(&attrs_some); + + // We can't directly verify counter values since they're private metrics + // But we can verify the methods don't panic and execute successfully + } + + /// Test edge case: FCU with invalid ancestor + #[tokio::test] + async fn test_fcu_with_invalid_ancestor() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Mark a block as invalid + let invalid_block_hash = B256::random(); + test_harness.tree.state.invalid_headers.insert(BlockWithParent { + block: NumHash::new(1, invalid_block_hash), + parent: B256::ZERO, + }); + + // Test FCU that points to a descendant of the invalid block + // This is a bit tricky to test directly, but we can verify the check_invalid_ancestor + // method + let result = test_harness.tree.check_invalid_ancestor(invalid_block_hash).unwrap(); + assert!(result.is_some(), "Should detect invalid ancestor"); + } + + /// Test `OpStack` specific behavior with canonical head + #[tokio::test] + async fn test_opstack_canonical_head_behavior() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Set engine kind to OpStack + test_harness.tree.engine_kind = EngineApiKind::OpStack; + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // For OpStack, even if head is already canonical, we should still process payload + // attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "OpStack should handle canonical head"); + } +}
diff --git reth/crates/era-downloader/src/client.rs scroll-reth/crates/era-downloader/src/client.rs index 298248ff3e9af4e0c0e304f15fb268d731e0ccce..36ed93e1e2fd58e9a6fa3fe6d39e499a0878c9c6 100644 --- reth/crates/era-downloader/src/client.rs +++ scroll-reth/crates/era-downloader/src/client.rs @@ -128,8 +128,6 @@ if let Some(name) = entry.file_name().to_str() && let Some(number) = self.file_name_to_number(name) && (number < index || number >= last) { - eprintln!("Deleting file {}", entry.path().display()); - eprintln!("{number} < {index} || {number} >= {last}"); reth_fs_util::remove_file(entry.path())?; } }
diff --git reth/crates/era-utils/src/history.rs scroll-reth/crates/era-utils/src/history.rs index 12bafed611371fc09a3546323707d09cf627b583..b1c3cd309c021b47ab72c919c363299a8faf1574 100644 --- reth/crates/era-utils/src/history.rs +++ scroll-reth/crates/era-utils/src/history.rs @@ -286,12 +286,12 @@ <P as NodePrimitivesProvider>::Primitives: NodePrimitives<BlockHeader = BH, BlockBody = BB>, { let mut last_header_number = match block_numbers.start_bound() { Bound::Included(&number) => number, - Bound::Excluded(&number) => number.saturating_sub(1), + Bound::Excluded(&number) => number.saturating_add(1), Bound::Unbounded => 0, }; let target = match block_numbers.end_bound() { Bound::Included(&number) => Some(number), - Bound::Excluded(&number) => Some(number.saturating_add(1)), + Bound::Excluded(&number) => Some(number.saturating_sub(1)), Bound::Unbounded => None, };
diff --git reth/crates/evm/evm/Cargo.toml scroll-reth/crates/evm/evm/Cargo.toml index 4bc8ef06dbbd6ab46e72cf1c74db0c9c8292ae52..aa6472a43e67de63f391a9fabcca6f55b2f4a980 100644 --- reth/crates/evm/evm/Cargo.toml +++ scroll-reth/crates/evm/evm/Cargo.toml @@ -28,6 +28,9 @@ alloy-eips.workspace = true alloy-evm.workspace = true alloy-consensus.workspace = true   +# scroll +scroll-alloy-evm = { workspace = true, optional = true, default-features = false } + auto_impl.workspace = true derive_more.workspace = true futures-util.workspace = true @@ -55,6 +58,7 @@ "derive_more/std", "reth-storage-api/std", "reth-trie-common/std", "reth-ethereum-primitives/std", + "scroll-alloy-evm?/std", ] metrics = ["std", "dep:metrics", "dep:reth-metrics"] test-utils = [ @@ -63,3 +67,4 @@ "reth-trie-common/test-utils", "reth-ethereum-primitives/test-utils", ] op = ["alloy-evm/op", "reth-primitives-traits/op"] +scroll-alloy-traits = ["dep:scroll-alloy-evm", "reth-primitives-traits/scroll-alloy-traits"]
diff --git reth/crates/evm/evm/src/aliases.rs scroll-reth/crates/evm/evm/src/aliases.rs index 6bb1ab1c35a6cae9fca57a3daa31dfb072a96e10..7758f0aea1731b188223018aa32eae31aae29aec 100644 --- reth/crates/evm/evm/src/aliases.rs +++ scroll-reth/crates/evm/evm/src/aliases.rs @@ -11,6 +11,9 @@ /// Helper to access [`EvmFactory::Spec`] for a given [`ConfigureEvm`]. pub type SpecFor<Evm> = <EvmFactoryFor<Evm> as EvmFactory>::Spec;   +/// Helper to access [`EvmFactory::BlockEnv`] for a given [`ConfigureEvm`]. +pub type BlockEnvFor<Evm> = <EvmFactoryFor<Evm> as EvmFactory>::BlockEnv; + /// Helper to access [`EvmFactory::Evm`] for a given [`ConfigureEvm`]. pub type EvmFor<Evm, DB, I = NoOpInspector> = <EvmFactoryFor<Evm> as EvmFactory>::Evm<DB, I>;   @@ -31,7 +34,7 @@ pub type ExecutionCtxFor<'a, Evm> = <<Evm as ConfigureEvm>::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>;   /// Type alias for [`EvmEnv`] for a given [`ConfigureEvm`]. -pub type EvmEnvFor<Evm> = EvmEnv<SpecFor<Evm>>; +pub type EvmEnvFor<Evm> = EvmEnv<SpecFor<Evm>, BlockEnvFor<Evm>>;   /// Helper trait to bound [`Inspector`] for a [`ConfigureEvm`]. pub trait InspectorFor<Evm: ConfigureEvm, DB: Database>: Inspector<EvmContextFor<Evm, DB>> {}
diff --git reth/crates/evm/evm/src/either.rs scroll-reth/crates/evm/evm/src/either.rs index 904ce7ebbd6dfab016a329df9e2616fc63b12bfc..d497e142d7a50cc29d84b0a04f368f35a7f6c442 100644 --- reth/crates/evm/evm/src/either.rs +++ scroll-reth/crates/evm/evm/src/either.rs @@ -58,7 +58,7 @@ block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>, state: F, ) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error> where - F: FnMut(&revm::database::State<DB>), + F: FnMut(&mut revm::database::State<DB>), { match self { Self::Left(a) => a.execute_with_state_closure(block, state),
diff --git reth/crates/evm/evm/src/engine.rs scroll-reth/crates/evm/evm/src/engine.rs index 5c721d811bc1fb9c3aa93c0cae23608965b6c5ab..5b46a0861706eba4b894bf9ee2d18b7186cf48b7 100644 --- reth/crates/evm/evm/src/engine.rs +++ scroll-reth/crates/evm/evm/src/engine.rs @@ -2,7 +2,7 @@ use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor};   /// [`ConfigureEvm`] extension providing methods for executing payloads. pub trait ConfigureEngineEvm<ExecutionData>: ConfigureEvm { - /// Returns an [`EvmEnvFor`] for the given payload. + /// Returns an [`crate::EvmEnv`] for the given payload. fn evm_env_for_payload(&self, payload: &ExecutionData) -> Result<EvmEnvFor<Self>, Self::Error>;   /// Returns an [`ExecutionCtxFor`] for the given payload.
diff --git reth/crates/evm/evm/src/execute.rs scroll-reth/crates/evm/evm/src/execute.rs index e318b5899397a350dc9fbaddc11a8a449d1f7cb5..6ea7b928514835603d2cf0c962926de70d64ad18 100644 --- reth/crates/evm/evm/src/execute.rs +++ scroll-reth/crates/evm/evm/src/execute.rs @@ -99,11 +99,11 @@ block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>, mut f: F, ) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error> where - F: FnMut(&State<DB>), + F: FnMut(&mut State<DB>), { let result = self.execute_one(block)?; let mut state = self.into_state(); - f(&state); + f(&mut state); Ok(BlockExecutionOutput { state: state.take_bundle(), result }) }   @@ -115,11 +115,11 @@ block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>, mut f: F, ) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error> where - F: FnMut(&State<DB>), + F: FnMut(&mut State<DB>), { let result = self.execute_one(block); let mut state = self.into_state(); - f(&state); + f(&mut state);   Ok(BlockExecutionOutput { state: state.take_bundle(), result: result? }) } @@ -149,6 +149,11 @@ fn size_hint(&self) -> usize; }   /// Helper type for the output of executing a block. +/// +/// Deprecated: this type is unused within reth and will be removed in the next +/// major release. Use `reth_execution_types::BlockExecutionResult` or +/// `reth_execution_types::BlockExecutionOutput`. +#[deprecated(note = "Use reth_execution_types::BlockExecutionResult or BlockExecutionOutput")] #[derive(Debug, Clone)] pub struct ExecuteOutput<R> { /// Receipts obtained after executing a block. @@ -198,7 +203,8 @@ pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> { /// Configuration of EVM used when executing the block. /// /// Contains context relevant to EVM such as [`revm::context::BlockEnv`]. - pub evm_env: EvmEnv<<F::EvmFactory as EvmFactory>::Spec>, + pub evm_env: + EvmEnv<<F::EvmFactory as EvmFactory>::Spec, <F::EvmFactory as EvmFactory>::BlockEnv>, /// [`BlockExecutorFactory::ExecutionCtx`] used to execute the block. pub execution_ctx: F::ExecutionCtx<'a>, /// Parent block header. @@ -220,7 +226,10 @@ impl<'a, 'b, F: BlockExecutorFactory, H> BlockAssemblerInput<'a, 'b, F, H> { /// Creates a new [`BlockAssemblerInput`]. #[expect(clippy::too_many_arguments)] pub fn new( - evm_env: EvmEnv<<F::EvmFactory as EvmFactory>::Spec>, + evm_env: EvmEnv< + <F::EvmFactory as EvmFactory>::Spec, + <F::EvmFactory as EvmFactory>::BlockEnv, + >, execution_ctx: F::ExecutionCtx<'a>, parent: &'a SealedHeader<H>, transactions: Vec<F::Transaction>, @@ -460,6 +469,7 @@ Executor: BlockExecutor< Evm: Evm< Spec = <F::EvmFactory as EvmFactory>::Spec, HaltReason = <F::EvmFactory as EvmFactory>::HaltReason, + BlockEnv = <F::EvmFactory as EvmFactory>::BlockEnv, DB = &'a mut State<DB>, >, Transaction = N::SignedTx, @@ -654,7 +664,6 @@ #[cfg(test)] mod tests { use super::*; use crate::Address; - use alloy_consensus::constants::KECCAK_EMPTY; use alloy_evm::block::state_changes::balance_increment_state; use alloy_primitives::{address, map::HashMap, U256}; use core::marker::PhantomData; @@ -726,12 +735,8 @@ ) -> State<CacheDB<EmptyDB>> { let db = CacheDB::<EmptyDB>::default(); let mut state = State::builder().with_database(db).with_bundle_update().build();   - let account_info = AccountInfo { - balance: U256::from(balance), - nonce, - code_hash: KECCAK_EMPTY, - code: None, - }; + let account_info = + AccountInfo { balance: U256::from(balance), nonce, ..Default::default() }; state.insert_account(addr, account_info); state } @@ -767,8 +772,7 @@ let addr2 = address!("0x2000000000000000000000000000000000000000");   let mut state = setup_state_with_account(addr1, 100, 1);   - let account2 = - AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + let account2 = AccountInfo { balance: U256::from(200), nonce: 1, ..Default::default() }; state.insert_account(addr2, account2);   let mut increments = HashMap::default(); @@ -789,8 +793,7 @@ let addr2 = address!("0x2000000000000000000000000000000000000000");   let mut state = setup_state_with_account(addr1, 100, 1);   - let account2 = - AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + let account2 = AccountInfo { balance: U256::from(200), nonce: 1, ..Default::default() }; state.insert_account(addr2, account2);   let mut increments = HashMap::default();
diff --git reth/crates/evm/evm/src/lib.rs scroll-reth/crates/evm/evm/src/lib.rs index a2a30b9e0abc7c2173735a3b4ccef7b90f86eb36..00cd3a0f1f201b0e546222714e95dfda09dc5239 100644 --- reth/crates/evm/evm/src/lib.rs +++ scroll-reth/crates/evm/evm/src/lib.rs @@ -574,3 +574,22 @@ fn set_access_list(&mut self, access_list: AccessList) { self.base.set_access_list(access_list); } } + +#[cfg(feature = "scroll-alloy-traits")] +impl<T: TransactionEnv> TransactionEnv for scroll_alloy_evm::ScrollTransactionIntoTxEnv<T> { + fn set_gas_limit(&mut self, gas_limit: u64) { + self.base.set_gas_limit(gas_limit); + } + + fn nonce(&self) -> u64 { + TransactionEnv::nonce(&self.base) + } + + fn set_nonce(&mut self, nonce: u64) { + self.base.set_nonce(nonce); + } + + fn set_access_list(&mut self, access_list: AccessList) { + self.base.set_access_list(access_list); + } +}
diff --git reth/crates/exex/exex/src/wal/storage.rs scroll-reth/crates/exex/exex/src/wal/storage.rs index 122edc632e72ee1d8d6beba7113d75a14749642a..466e176232199fddd8f3b7f3fc050ff6fcd67224 100644 --- reth/crates/exex/exex/src/wal/storage.rs +++ scroll-reth/crates/exex/exex/src/wal/storage.rs @@ -186,6 +186,7 @@ // wal with 1 block and tx // <https://github.com/paradigmxyz/reth/issues/15012> #[test] + #[ignore] fn decode_notification_wal() { let wal = include_bytes!("../../test-data/28.wal"); let notification: reth_exex_types::serde_bincode_compat::ExExNotification<
diff --git reth/crates/exex/types/src/notification.rs scroll-reth/crates/exex/types/src/notification.rs index cf0d7580556a8ec2d9f947f4e35c0731b2eaafc3..8a62fa9230e86d0df64bf133510a6260fcbb87e3 100644 --- reth/crates/exex/types/src/notification.rs +++ scroll-reth/crates/exex/types/src/notification.rs @@ -7,6 +7,7 @@ /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[allow(clippy::large_enum_variant)] pub enum ExExNotification<N: NodePrimitives = reth_chain_state::EthPrimitives> { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted {
diff --git reth/crates/net/discv4/src/lib.rs scroll-reth/crates/net/discv4/src/lib.rs index 3686d7bf690debd3db46df576ca792c2ce297d94..83106cbbe6e0bc55418032afdce26779d67d649e 100644 --- reth/crates/net/discv4/src/lib.rs +++ scroll-reth/crates/net/discv4/src/lib.rs @@ -2402,7 +2402,7 @@ EnrForkId(NodeRecord, ForkId), /// Node that was removed from the table Removed(PeerId), /// A series of updates - Batch(Vec<DiscoveryUpdate>), + Batch(Vec<Self>), }   #[cfg(test)]
diff --git reth/crates/net/eth-wire-types/src/capability.rs scroll-reth/crates/net/eth-wire-types/src/capability.rs index 3f39bed60647df467d8d7cccffb927d429159fb9..2c8119027d1b9bf75eaefe819a1c899792c3e77b 100644 --- reth/crates/net/eth-wire-types/src/capability.rs +++ scroll-reth/crates/net/eth-wire-types/src/capability.rs @@ -160,6 +160,7 @@ impl Capabilities { /// Returns all capabilities. #[inline] + #[allow(clippy::missing_const_for_fn)] pub fn capabilities(&self) -> &[Capability] { &self.inner }
diff --git reth/crates/net/network-api/Cargo.toml scroll-reth/crates/net/network-api/Cargo.toml index b0ebed8bcfba346b4cc2cb807f35b3cfb881c064..2bb54aa0d6b1886b3c167f916637bba57433ec4a 100644 --- reth/crates/net/network-api/Cargo.toml +++ scroll-reth/crates/net/network-api/Cargo.toml @@ -19,6 +19,7 @@ reth-network-p2p.workspace = true reth-eth-wire-types.workspace = true reth-tokio-util.workspace = true reth-ethereum-forks.workspace = true +reth-primitives-traits.workspace = true   # ethereum alloy-consensus.workspace = true @@ -48,4 +49,5 @@ "enr/serde", "reth-ethereum-forks/serde", "alloy-consensus/serde", "alloy-rpc-types-eth/serde", + "reth-primitives-traits/serde", ]
diff --git reth/crates/net/network-api/src/block.rs scroll-reth/crates/net/network-api/src/block.rs new file mode 100644 index 0000000000000000000000000000000000000000..994c90a09a847e0d6f879408bfb972b7afc0a965 --- /dev/null +++ scroll-reth/crates/net/network-api/src/block.rs @@ -0,0 +1,26 @@ +use super::*; +use alloy_primitives::B256; +use reth_tokio_util::EventStream; +use tokio::sync::oneshot; + +/// The message that is broadcast to subscribers of the block import channel. +#[derive(Debug, Clone)] +pub struct NewBlockWithPeer<B> { + /// The peer that sent the block. + pub peer_id: PeerId, + /// The block that was received. + pub block: B, +} + +/// Provides a listener for new blocks on the eth wire protocol. +pub trait EthWireProvider<N: NetworkPrimitives> { + /// Create a new eth wire block listener. + fn eth_wire_block_listener( + &self, + ) -> impl Future< + Output = Result<EventStream<NewBlockWithPeer<N::Block>>, oneshot::error::RecvError>, + > + Send; + + /// Announce a new block to the network over the eth wire protocol. + fn eth_wire_announce_block(&self, block: N::NewBlockPayload, hash: B256); +}
diff --git reth/crates/net/network-api/src/lib.rs scroll-reth/crates/net/network-api/src/lib.rs index 754463cb34fb9d2952d95ebc26e2413ad9d45213..e003f73a25c329e53c42dc69ab5625d91856bec9 100644 --- reth/crates/net/network-api/src/lib.rs +++ scroll-reth/crates/net/network-api/src/lib.rs @@ -20,7 +20,11 @@ pub mod events; /// Implementation of network traits for that does nothing. pub mod noop;   +/// Type used for the eth wire bridge. +pub mod block; + pub mod test_utils; +use block::EthWireProvider; use test_utils::PeersHandleProvider;   pub use alloy_rpc_types_admin::EthProtocolInfo; @@ -54,6 +58,7 @@ + NetworkInfo + NetworkEventListenerProvider + Peers + PeersHandleProvider + + EthWireProvider<Self::Primitives> + Clone + Unpin + 'static @@ -68,6 +73,7 @@ + NetworkInfo + NetworkEventListenerProvider + Peers + PeersHandleProvider + + EthWireProvider<Self::Primitives> + Clone + Unpin + 'static
diff --git reth/crates/net/network-api/src/noop.rs scroll-reth/crates/net/network-api/src/noop.rs index 2aaa0093568d06415f8b5b623af2595e3d492dd1..a40f21270a922f52dde89430f0d2c693e3b719e4 100644 --- reth/crates/net/network-api/src/noop.rs +++ scroll-reth/crates/net/network-api/src/noop.rs @@ -7,6 +7,7 @@ use core::{fmt, marker::PhantomData}; use std::net::{IpAddr, SocketAddr};   use crate::{ + block::{EthWireProvider, NewBlockWithPeer}, events::{NetworkPeersEvents, PeerEventStream}, test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, @@ -22,7 +23,7 @@ use reth_network_p2p::{sync::NetworkSyncUpdater, NoopFullBlockClient}; use reth_network_peers::NodeRecord; use reth_network_types::{PeerKind, Reputation, ReputationChangeKind}; use reth_tokio_util::{EventSender, EventStream}; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::UnboundedReceiverStream;   /// A type that implements all network trait that does nothing. @@ -205,6 +206,22 @@ fn discovery_listener(&self) -> UnboundedReceiverStream<DiscoveryEvent> { let (_, rx) = mpsc::unbounded_channel(); UnboundedReceiverStream::new(rx) + } +} + +impl<N: NetworkPrimitives> EthWireProvider<N> for NoopNetwork<N> { + async fn eth_wire_block_listener( + &self, + ) -> Result<EventStream<NewBlockWithPeer<N::Block>>, RecvError> { + unreachable!() + } + + fn eth_wire_announce_block( + &self, + _block: <N as NetworkPrimitives>::NewBlockPayload, + _hash: alloy_primitives::B256, + ) { + unreachable!() } }
diff --git reth/crates/net/network/Cargo.toml scroll-reth/crates/net/network/Cargo.toml index 54902ef478837fe58e58968e95ab365076f064ca..caee58ef6eb76a69b21347b6639a179e9019b377 100644 --- reth/crates/net/network/Cargo.toml +++ scroll-reth/crates/net/network/Cargo.toml @@ -60,13 +60,13 @@ reth-metrics = { workspace = true, features = ["common"] } metrics.workspace = true   # misc +async-trait.workspace = true auto_impl.workspace = true aquamarine.workspace = true tracing.workspace = true rustc-hash.workspace = true thiserror.workspace = true parking_lot.workspace = true -rand.workspace = true rand_08.workspace = true secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } derive_more.workspace = true @@ -94,6 +94,7 @@ # misc url.workspace = true secp256k1 = { workspace = true, features = ["rand"] } +rand.workspace = true   ## Benchmarks criterion = { workspace = true, features = ["async_tokio", "html_reports"] } @@ -112,7 +113,6 @@ "alloy-eips/serde", "alloy-primitives/serde", "discv5/serde", "parking_lot/serde", - "rand/serde", "smallvec/serde", "url/serde", "reth-primitives-traits/serde", @@ -122,6 +122,7 @@ "reth-transaction-pool/serde", "reth-ethereum-primitives/serde", "reth-network-api/serde", "rand_08/serde", + "rand/serde", "reth-storage-api/serde", ] test-utils = [
diff --git reth/crates/net/network/src/budget.rs scroll-reth/crates/net/network/src/budget.rs index 824148387b40227d9e09256124762f9284bde1dd..f1d9ca874694a1bb9815c3b1547e6669253e659f 100644 --- reth/crates/net/network/src/budget.rs +++ scroll-reth/crates/net/network/src/budget.rs @@ -35,13 +35,6 @@ // // Default is 40 pending pool imports. pub const DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS: u32 = 4 * DEFAULT_BUDGET_TRY_DRAIN_STREAM;   -/// Default budget to try and stream hashes of successfully imported transactions from the pool. -/// -/// Default is naturally same as the number of transactions to attempt importing, -/// [`DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS`], so 40 pool imports. -pub const DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS: u32 = - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS; - /// Polls the given stream. Breaks with `true` if there maybe is more work. #[macro_export] macro_rules! poll_nested_stream_with_budget {
diff --git reth/crates/net/network/src/builder.rs scroll-reth/crates/net/network/src/builder.rs index 3f36b1bdc80562e3c6eb5a0967470dc5461d956e..c083ccb34212727331743e65e07d2f820371f522 100644 --- reth/crates/net/network/src/builder.rs +++ scroll-reth/crates/net/network/src/builder.rs @@ -9,11 +9,13 @@ config::{StrictEthAnnouncementFilter, TransactionPropagationKind}, policy::NetworkPolicies, TransactionPropagationPolicy, TransactionsManager, TransactionsManagerConfig, }, + transform::header::HeaderResponseTransform, NetworkHandle, NetworkManager, }; use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandleProvider; use reth_transaction_pool::TransactionPool; +use std::sync::Arc; use tokio::sync::mpsc;   /// We set the max channel capacity of the `EthRequestHandler` to 256 @@ -63,12 +65,13 @@ /// Creates a new [`EthRequestHandler`] and wires it to the network. pub fn request_handler<Client>( self, client: Client, + request_header_transform: Option<Arc<dyn HeaderResponseTransform<N::BlockHeader>>>, ) -> NetworkBuilder<Tx, EthRequestHandler<Client, N>, N> { let Self { mut network, transactions, .. } = self; let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); network.set_eth_request_handler(tx); let peers = network.handle().peers_handle().clone(); - let request_handler = EthRequestHandler::new(client, peers, rx); + let request_handler = EthRequestHandler::new(client, peers, rx, request_header_transform); NetworkBuilder { network, request_handler, transactions } }
diff --git reth/crates/net/network/src/config.rs scroll-reth/crates/net/network/src/config.rs index 8e8d11fe69de5594d8979b1dab5d70eb0a19e948..04ac4cf27141a3848ae6296ec85959667c4400c4 100644 --- reth/crates/net/network/src/config.rs +++ scroll-reth/crates/net/network/src/config.rs @@ -4,6 +4,7 @@ use crate::{ error::NetworkError, import::{BlockImport, ProofOfStakeBlockImport}, transactions::TransactionsManagerConfig, + transform::header::HeaderTransform, NetworkHandle, NetworkManager, }; use alloy_primitives::B256; @@ -97,6 +98,8 @@ pub handshake: Arc<dyn EthRlpxHandshake>, /// List of block hashes to check for required blocks. /// If non-empty, peers that don't have these blocks will be filtered out. pub required_block_hashes: Vec<B256>, + /// A transformation hook applied to the downloaded headers. + pub header_transform: Arc<dyn HeaderTransform<N::BlockHeader>>, }   // === impl NetworkConfig === @@ -173,7 +176,7 @@ pub async fn start_network(self) -> Result<NetworkHandle<N>, NetworkError> { let client = self.client.clone(); let (handle, network, _txpool, eth) = NetworkManager::builder::<C>(self) .await? - .request_handler::<C>(client) + .request_handler::<C>(client, None) .split_with_handle();   tokio::task::spawn(network); @@ -226,6 +229,10 @@ /// <https://github.com/ethereum/devp2p/blob/master/rlpx.md#initial-handshake>. handshake: Arc<dyn EthRlpxHandshake>, /// List of block hashes to check for required blocks. required_block_hashes: Vec<B256>, + /// Optional network id + network_id: Option<u64>, + /// The header transform type. + header_transform: Option<Arc<dyn HeaderTransform<N::BlockHeader>>>, }   impl NetworkConfigBuilder<EthNetworkPrimitives> { @@ -267,6 +274,8 @@ transactions_manager_config: Default::default(), nat: None, handshake: Arc::new(EthHandshake::default()), required_block_hashes: Vec::new(), + network_id: None, + header_transform: None, } }   @@ -587,6 +596,21 @@ self.handshake = handshake; self }   + /// Set the optional network id. + pub const fn network_id(mut self, network_id: Option<u64>) -> Self { + self.network_id = network_id; + self + } + + /// Sets the header transform type. + pub fn header_transform( + mut self, + header_transform: Arc<dyn HeaderTransform<N::BlockHeader>>, + ) -> Self { + self.header_transform = Some(header_transform); + self + } + /// Consumes the type and creates the actual [`NetworkConfig`] /// for the given client type that can interact with the chain. /// @@ -620,6 +644,8 @@ transactions_manager_config, nat, handshake, required_block_hashes, + network_id, + header_transform, } = self;   let head = head.unwrap_or_else(|| Head { @@ -646,7 +672,11 @@ hello_message.unwrap_or_else(|| HelloMessage::builder(peer_id).build()); hello_message.port = listener_addr.port();   // set the status - let status = UnifiedStatus::spec_builder(&chain_spec, &head); + let mut status = UnifiedStatus::spec_builder(&chain_spec, &head); + + if let Some(id) = network_id { + status.chain = id.into(); + }   // set a fork filter based on the chain spec and head let fork_filter = chain_spec.fork_filter(head); @@ -687,6 +717,7 @@ transactions_manager_config, nat, handshake, required_block_hashes, + header_transform: header_transform.unwrap_or_else(|| Arc::new(())), } } }
diff --git reth/crates/net/network/src/eth_requests.rs scroll-reth/crates/net/network/src/eth_requests.rs index 492bf8bd55e4354632ef19d4e3a76b2e74b6be8f..b0911a2ab472369e03f4075b9b6d8bab5d8b2c08 100644 --- reth/crates/net/network/src/eth_requests.rs +++ scroll-reth/crates/net/network/src/eth_requests.rs @@ -2,12 +2,12 @@ //! Blocks/Headers management for the p2p network.   use crate::{ budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, - metrics::EthRequestHandlerMetrics, + metrics::EthRequestHandlerMetrics, transform::header::HeaderResponseTransform, }; use alloy_consensus::{BlockHeader, ReceiptWithBloom}; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; -use futures::StreamExt; +use futures::{future::join_all, StreamExt}; use reth_eth_wire::{ BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, Receipts69, @@ -20,6 +20,7 @@ use reth_storage_api::{BlockReader, HeaderProvider}; use std::{ future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, time::Duration, }; @@ -54,13 +55,15 @@ #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] pub struct EthRequestHandler<C, N: NetworkPrimitives = EthNetworkPrimitives> { /// The client type that can interact with the chain. - client: C, + client: Arc<C>, /// Used for reporting peers. // TODO use to report spammers #[expect(dead_code)] peers: PeersHandle, /// Incoming request from the [`NetworkManager`](crate::NetworkManager). incoming_requests: ReceiverStream<IncomingEthRequest<N>>, + /// The header transform to apply to the headers before sending to peers. + header_transform: Option<Arc<dyn HeaderResponseTransform<N::BlockHeader>>>, /// Metrics for the eth request handler. metrics: EthRequestHandlerMetrics, } @@ -68,11 +71,17 @@ // === impl EthRequestHandler === impl<C, N: NetworkPrimitives> EthRequestHandler<C, N> { /// Create a new instance - pub fn new(client: C, peers: PeersHandle, incoming: Receiver<IncomingEthRequest<N>>) -> Self { + pub fn new( + client: C, + peers: PeersHandle, + incoming: Receiver<IncomingEthRequest<N>>, + header_transform: Option<Arc<dyn HeaderResponseTransform<N::BlockHeader>>>, + ) -> Self { Self { - client, + client: Arc::new(client), peers, incoming_requests: ReceiverStream::new(incoming), + header_transform, metrics: Default::default(), } } @@ -81,10 +90,14 @@ impl<C, N> EthRequestHandler<C, N> where N: NetworkPrimitives, - C: BlockReader, + C: BlockReader<Header = N::BlockHeader> + 'static, { /// Returns the list of requested headers - fn get_headers_response(&self, request: GetBlockHeaders) -> Vec<C::Header> { + async fn get_headers_response( + client: Arc<C>, + header_transform: Option<Arc<dyn HeaderResponseTransform<N::BlockHeader>>>, + request: GetBlockHeaders, + ) -> Vec<C::Header> { let GetBlockHeaders { start_block, limit, skip, direction } = request;   let mut headers = Vec::new(); @@ -92,9 +105,7 @@ let mut block: BlockHashOrNumber = match start_block { BlockHashOrNumber::Hash(start) => start.into(), BlockHashOrNumber::Number(num) => { - let Some(hash) = self.client.block_hash(num).unwrap_or_default() else { - return headers - }; + let Some(hash) = client.block_hash(num).unwrap_or_default() else { return headers }; hash.into() } }; @@ -103,7 +114,7 @@ let skip = skip as u64; let mut total_bytes = 0;   for _ in 0..limit { - if let Some(header) = self.client.header_by_hash_or_number(block).unwrap_or_default() { + if let Some(header) = client.header_by_hash_or_number(block).unwrap_or_default() { let number = header.number(); let parent_hash = header.parent_hash();   @@ -144,6 +155,11 @@ break } }   + // TODO: remove this once we deprecated l2geth + if let Some(ref header_transform) = header_transform { + return join_all(headers.into_iter().map(|h| header_transform.map(h))).await; + } + headers }   @@ -152,10 +168,14 @@ &self, _peer_id: PeerId, request: GetBlockHeaders, response: oneshot::Sender<RequestResult<BlockHeaders<C::Header>>>, - ) { + ) -> impl Future<Output = ()> + 'static { self.metrics.eth_headers_requests_received_total.increment(1); - let headers = self.get_headers_response(request); - let _ = response.send(Ok(BlockHeaders(headers))); + let client = self.client.clone(); + let header_transform = self.header_transform.clone(); + async move { + let headers = Self::get_headers_response(client, header_transform, request).await; + let _ = response.send(Ok(BlockHeaders(headers))); + } }   fn on_bodies_request( @@ -254,7 +274,8 @@ where N: NetworkPrimitives, C: BlockReader<Block = N::Block, Receipt = N::Receipt> + HeaderProvider<Header = N::BlockHeader> - + Unpin, + + Unpin + + 'static, { type Output = ();   @@ -271,7 +292,8 @@ this.incoming_requests.poll_next_unpin(cx), |incoming| { match incoming { IncomingEthRequest::GetBlockHeaders { peer_id, request, response } => { - this.on_headers_request(peer_id, request, response) + let future = this.on_headers_request(peer_id, request, response); + tokio::spawn(future); } IncomingEthRequest::GetBlockBodies { peer_id, request, response } => { this.on_bodies_request(peer_id, request, response)
diff --git reth/crates/net/network/src/fetch/mod.rs scroll-reth/crates/net/network/src/fetch/mod.rs index 6c14e99400896b5d817fe37cdcbb69d8ee819da5..256ff8f3d5211698055f15a740e569f3d1db3464 100644 --- reth/crates/net/network/src/fetch/mod.rs +++ scroll-reth/crates/net/network/src/fetch/mod.rs @@ -4,7 +4,7 @@ mod client;   pub use client::FetchClient;   -use crate::{message::BlockRequest, session::BlockRangeInfo}; +use crate::{message::BlockRequest, session::BlockRangeInfo, transform::header::HeaderTransform}; use alloy_primitives::B256; use futures::StreamExt; use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; @@ -55,12 +55,18 @@ /// Receiver for new incoming download requests download_requests_rx: UnboundedReceiverStream<DownloadRequest<N>>, /// Sender for download requests, used to detach a [`FetchClient`] download_requests_tx: UnboundedSender<DownloadRequest<N>>, + /// A transformation hook applied to the downloaded headers. + header_transform: Arc<dyn HeaderTransform<N::BlockHeader>>, }   // === impl StateSyncer ===   impl<N: NetworkPrimitives> StateFetcher<N> { - pub(crate) fn new(peers_handle: PeersHandle, num_active_peers: Arc<AtomicUsize>) -> Self { + pub(crate) fn new( + peers_handle: PeersHandle, + num_active_peers: Arc<AtomicUsize>, + header_transform: Arc<dyn HeaderTransform<N::BlockHeader>>, + ) -> Self { let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel(); Self { inflight_headers_requests: Default::default(), @@ -71,6 +77,7 @@ num_active_peers, queued_requests: Default::default(), download_requests_rx: UnboundedReceiverStream::new(download_requests_rx), download_requests_tx, + header_transform, } }   @@ -272,8 +279,15 @@ let is_likely_bad_response = resp.as_ref().is_some_and(|r| res.is_likely_bad_headers_response(&r.request));   if let Some(resp) = resp { - // delegate the response - let _ = resp.response.send(res.map(|h| (peer_id, h).into())); + let header_transform = self.header_transform.clone(); + tokio::spawn(async move { + let res = match res { + Ok(headers) => Ok(header_transform.map(headers).await), + Err(e) => Err(e), + }; + + let _ = resp.response.send(res.map(|h| (peer_id, h).into())); + }); }   if let Some(peer) = self.peers.get_mut(&peer_id) { @@ -484,8 +498,11 @@ #[tokio::test(flavor = "multi_thread")] async fn test_poll_fetcher() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = - StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default()); + let mut fetcher = StateFetcher::<EthNetworkPrimitives>::new( + manager.handle(), + Default::default(), + Arc::new(()), + );   poll_fn(move |cx| { assert!(fetcher.poll(cx).is_pending()); @@ -506,8 +523,11 @@ #[tokio::test] async fn test_peer_rotation() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = - StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default()); + let mut fetcher = StateFetcher::<EthNetworkPrimitives>::new( + manager.handle(), + Default::default(), + Arc::new(()), + ); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -530,8 +550,11 @@ #[tokio::test] async fn test_peer_prioritization() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = - StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default()); + let mut fetcher = StateFetcher::<EthNetworkPrimitives>::new( + manager.handle(), + Default::default(), + Arc::new(()), + ); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -556,8 +579,11 @@ #[tokio::test] async fn test_on_block_headers_response() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = - StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default()); + let mut fetcher = StateFetcher::<EthNetworkPrimitives>::new( + manager.handle(), + Default::default(), + Arc::new(()), + ); let peer_id = B512::random();   assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None); @@ -587,8 +613,11 @@ #[tokio::test] async fn test_header_response_outcome() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = - StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default()); + let mut fetcher = StateFetcher::<EthNetworkPrimitives>::new( + manager.handle(), + Default::default(), + Arc::new(()), + ); let peer_id = B512::random();   let request_pair = || {
diff --git reth/crates/net/network/src/lib.rs scroll-reth/crates/net/network/src/lib.rs index a84168d3846941dfc0c5d2855ffc1f837da175d0..c164d9cd5455fc3671f8b0766c6d0574746ef10b 100644 --- reth/crates/net/network/src/lib.rs +++ scroll-reth/crates/net/network/src/lib.rs @@ -98,7 +98,7 @@ //! let (handle, network, transactions, request_handler) = NetworkManager::builder(config) //! .await //! .unwrap() //! .transactions(pool, transactions_manager_config) -//! .request_handler(client) +//! .request_handler(client, None) //! .split_with_handle(); //! } //! ``` @@ -130,6 +130,7 @@ pub mod message; pub mod peers; pub mod protocol; pub mod transactions; +pub mod transform;   mod budget; mod builder;
diff --git reth/crates/net/network/src/manager.rs scroll-reth/crates/net/network/src/manager.rs index c0a2934df7582147e6577a80413043f2e2c9dc01..b9e5643e0e53920d48382ec99036c44765b7a528 100644 --- reth/crates/net/network/src/manager.rs +++ scroll-reth/crates/net/network/src/manager.rs @@ -21,7 +21,7 @@ config::NetworkConfig, discovery::Discovery, error::{NetworkError, ServiceKind}, eth_requests::IncomingEthRequest, - import::{BlockImport, BlockImportEvent, BlockImportOutcome, BlockValidation, NewBlockEvent}, + import::{BlockImportEvent, BlockImportOutcome, BlockValidation}, listener::ConnectionListener, message::{NewBlockMessage, PeerMessage}, metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, @@ -40,9 +40,11 @@ use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_chainspec::EnrForkIdEntry; use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, NetworkPrimitives}; +use reth_eth_wire_types::NewBlockPayload; use reth_fs_util::{self as fs, FsPathError}; use reth_metrics::common::mpsc::UnboundedMeteredSender; use reth_network_api::{ + block::NewBlockWithPeer, events::{PeerEvent, SessionInfo}, test_utils::PeersHandle, EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, @@ -110,7 +112,7 @@ handle: NetworkHandle<N>, /// Receiver half of the command channel set up between this type and the [`NetworkHandle`] from_handle_rx: UnboundedReceiverStream<NetworkHandleMessage<N>>, /// Handles block imports according to the `eth` protocol. - block_import: Box<dyn BlockImport<N::NewBlockPayload>>, + block_import: EventSender<NewBlockWithPeer<N::Block>>, /// Sender for high level network events. event_sender: EventSender<NetworkEvent<PeerRequest<N>>>, /// Sender half to send events to the @@ -238,7 +240,7 @@ listener_addr, peers_config, sessions_config, chain_id, - block_import, + block_import: _, network_mode, boot_nodes, executor, @@ -252,6 +254,7 @@ transactions_manager_config: _, nat, handshake, required_block_hashes, + header_transform, } = config;   let peers_manager = PeersManager::new(peers_config); @@ -313,6 +316,7 @@ crate::state::BlockNumReader::new(client), discovery, peers_manager, Arc::clone(&num_active_peers), + header_transform, );   let swarm = Swarm::new(incoming, sessions, state); @@ -347,7 +351,7 @@ Ok(Self { swarm, handle, from_handle_rx: UnboundedReceiverStream::new(from_handle_rx), - block_import, + block_import: EventSender::new(1000), event_sender, to_transactions_manager: None, to_eth_request_handler: None, @@ -384,7 +388,7 @@ /// let (handle, network, transactions, request_handler) = NetworkManager::builder(config) /// .await /// .unwrap() /// .transactions(pool, transactions_manager_config) - /// .request_handler(client) + /// .request_handler(client, None) /// .split_with_handle(); /// } /// ``` @@ -542,6 +546,7 @@ } } }   + #[allow(dead_code)] /// Invoked after a `NewBlock` message from the peer was validated fn on_block_import_result(&mut self, event: BlockImportEvent<N::NewBlockPayload>) { match event { @@ -608,14 +613,16 @@ self.within_pow_or_disconnect(peer_id, |this| { // update peer's state, to track what blocks this peer has seen this.swarm.state_mut().on_new_block_hashes(peer_id, hashes.0.clone()); // start block import process for the hashes - this.block_import.on_new_block(peer_id, NewBlockEvent::Hashes(hashes)); + // this.block_import.on_new_block(peer_id, NewBlockEvent::Hashes(hashes)); }) } PeerMessage::NewBlock(block) => { self.within_pow_or_disconnect(peer_id, move |this| { this.swarm.state_mut().on_new_block(peer_id, block.hash); + let block = Arc::unwrap_or_clone(block.block); // start block import process - this.block_import.on_new_block(peer_id, NewBlockEvent::Block(block)); + this.block_import + .notify(NewBlockWithPeer { peer_id, block: block.block().clone() }); }); } PeerMessage::PooledTransactions(msg) => { @@ -646,6 +653,9 @@ /// Handler for received messages from a handle fn on_handle_message(&mut self, msg: NetworkHandleMessage<N>) { match msg { + NetworkHandleMessage::EthWireBlockListener(tx) => { + let _ = tx.send(self.block_import.new_listener()); + } NetworkHandleMessage::DiscoveryListener(tx) => { self.swarm.state_mut().discovery_mut().add_listener(tx); } @@ -1095,11 +1105,6 @@ let start = Instant::now(); let mut poll_durations = NetworkManagerPollDurations::default();   let this = self.get_mut(); - - // poll new block imports (expected to be a noop for POS) - while let Poll::Ready(outcome) = this.block_import.poll(cx) { - this.on_block_import_result(outcome); - }   // These loops drive the entire state of network and does a lot of work. Under heavy load // (many messages/events), data may arrive faster than it can be processed (incoming
diff --git reth/crates/net/network/src/network.rs scroll-reth/crates/net/network/src/network.rs index cfc3d56cb28dceebbd9cde934287d2dcc2a7e0e1..9e01cff13aeea0775b592f0d8601b28a69c0ef40 100644 --- reth/crates/net/network/src/network.rs +++ scroll-reth/crates/net/network/src/network.rs @@ -14,6 +14,7 @@ NewPooledTransactionHashes, SharedTransactions, }; use reth_ethereum_forks::Head; use reth_network_api::{ + block::{EthWireProvider, NewBlockWithPeer}, events::{NetworkPeersEvents, PeerEvent, PeerEventStream}, test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, @@ -88,15 +89,18 @@ Self { inner: Arc::new(inner) } }   /// Returns the [`PeerId`] used in the network. + #[allow(clippy::missing_const_for_fn)] pub fn peer_id(&self) -> &PeerId { &self.inner.local_peer_id }   + #[allow(clippy::missing_const_for_fn)] fn manager(&self) -> &UnboundedSender<NetworkHandleMessage<N>> { &self.inner.to_manager_tx }   /// Returns the mode of the network, either pow, or pos + #[allow(clippy::missing_const_for_fn)] pub fn mode(&self) -> &NetworkMode { &self.inner.network_mode } @@ -182,11 +186,13 @@ self.send_message(NetworkHandleMessage::SetNetworkState(network_conn)); }   /// Whether tx gossip is disabled + #[allow(clippy::missing_const_for_fn)] pub fn tx_gossip_disabled(&self) -> bool { self.inner.tx_gossip_disabled }   /// Returns the secret key used for authenticating sessions. + #[allow(clippy::missing_const_for_fn)] pub fn secret_key(&self) -> &SecretKey { &self.inner.secret_key } @@ -216,6 +222,20 @@ fn discovery_listener(&self) -> UnboundedReceiverStream<DiscoveryEvent> { let (tx, rx) = mpsc::unbounded_channel(); let _ = self.manager().send(NetworkHandleMessage::DiscoveryListener(tx)); UnboundedReceiverStream::new(rx) + } +} + +impl<N: NetworkPrimitives> EthWireProvider<N> for NetworkHandle<N> { + async fn eth_wire_block_listener( + &self, + ) -> Result<EventStream<NewBlockWithPeer<N::Block>>, oneshot::error::RecvError> { + let (tx, rx) = oneshot::channel(); + self.send_message(NetworkHandleMessage::EthWireBlockListener(tx)); + rx.await + } + + fn eth_wire_announce_block(&self, block: N::NewBlockPayload, hash: B256) { + self.announce_block(block, hash) } }   @@ -548,4 +568,6 @@ /// Connect to the given peer. ConnectPeer(PeerId, PeerKind, PeerAddr), /// Message to update the node's advertised block range information. InternalBlockRangeUpdate(BlockRangeUpdate), + /// Retries a eth wire new block event listener. + EthWireBlockListener(oneshot::Sender<EventStream<NewBlockWithPeer<N::Block>>>), }
diff --git reth/crates/net/network/src/session/active.rs scroll-reth/crates/net/network/src/session/active.rs index 32f908998514a25981ddf0dee13ae6bf3b8ac6ea..0044c1f92e1529a26e745c3f96692349b3e58e43 100644 --- reth/crates/net/network/src/session/active.rs +++ scroll-reth/crates/net/network/src/session/active.rs @@ -924,6 +924,16 @@ self.messages.shrink_to_fit(); } }   +impl<N: NetworkPrimitives> Drop for QueuedOutgoingMessages<N> { + fn drop(&mut self) { + // Ensure gauge is decremented for any remaining items to avoid metric leak on teardown. + let remaining = self.messages.len(); + if remaining > 0 { + self.count.decrement(remaining as f64); + } + } +} + #[cfg(test)] mod tests { use super::*;
diff --git reth/crates/net/network/src/state.rs scroll-reth/crates/net/network/src/state.rs index 57d1a73198eeba2c29d9073974f71d943deb7508..0d4fbfa19ccd8bed9c49992f3a71047e47a8875e 100644 --- reth/crates/net/network/src/state.rs +++ scroll-reth/crates/net/network/src/state.rs @@ -7,11 +7,11 @@ fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, peers::{PeerAction, PeersManager}, session::BlockRangeInfo, + transform::header::HeaderTransform, FetchClient, }; use alloy_consensus::BlockHeader; use alloy_primitives::B256; -use rand::seq::SliceRandom; use reth_eth_wire::{ BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlockHashes, NewBlockPayload, UnifiedStatus, @@ -102,8 +102,10 @@ client: BlockNumReader, discovery: Discovery, peers_manager: PeersManager, num_active_peers: Arc<AtomicUsize>, + header_transform: Arc<dyn HeaderTransform<N::BlockHeader>>, ) -> Self { - let state_fetcher = StateFetcher::new(peers_manager.handle(), num_active_peers); + let state_fetcher = + StateFetcher::new(peers_manager.handle(), num_active_peers, header_transform); Self { active_peers: Default::default(), peers_manager, @@ -189,21 +191,13 @@ /// Starts propagating the new block to peers that haven't reported the block yet. /// /// This is supposed to be invoked after the block was validated. /// - /// > It then sends the block to a small fraction of connected peers (usually the square root of - /// > the total number of peers) using the `NewBlock` message. + /// Note: Sends a `NewBlock` message to all of the connected peers. This is okay because this + /// method is only used until we deprecate l2geth clients which don't support scroll-wire. /// /// See also <https://github.com/ethereum/devp2p/blob/master/caps/eth.md> pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage<N::NewBlockPayload>) { - // send a `NewBlock` message to a fraction of the connected peers (square root of the total - // number of peers) - let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; - let number = msg.block.block().header().number(); - let mut count = 0; - - // Shuffle to propagate to a random sample of peers on every block announcement - let mut peers: Vec<_> = self.active_peers.iter_mut().collect(); - peers.shuffle(&mut rand::rng()); + let peers: Vec<_> = self.active_peers.iter_mut().collect();   for (peer_id, peer) in peers { if peer.blocks.contains(&msg.hash) { @@ -212,24 +206,16 @@ continue }   // Queue a `NewBlock` message for the peer - if count < num_propagate { - self.queued_messages - .push_back(StateAction::NewBlock { peer_id: *peer_id, block: msg.clone() }); + self.queued_messages + .push_back(StateAction::NewBlock { peer_id: *peer_id, block: msg.clone() });   - // update peer block info - if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) { - peer.best_hash = msg.hash; - } - - // mark the block as seen by the peer - peer.blocks.insert(msg.hash); - - count += 1; + // update peer block info + if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) { + peer.best_hash = msg.hash; }   - if count >= num_propagate { - break - } + // mark the block as seen by the peer + peer.blocks.insert(msg.hash); } }   @@ -596,7 +582,7 @@ peers_manager: Default::default(), queued_messages: Default::default(), client: BlockNumReader(Box::new(NoopProvider::default())), discovery: Discovery::noop(), - state_fetcher: StateFetcher::new(handle, Default::default()), + state_fetcher: StateFetcher::new(handle, Default::default(), Arc::new(())), } }
diff --git reth/crates/net/network/src/test_utils/testnet.rs scroll-reth/crates/net/network/src/test_utils/testnet.rs index d24668995433610faee391afb4916ecdcbd865d8..deef0791b0aa91e1780a7662a7ee3eaf242be0b2 100644 --- reth/crates/net/network/src/test_utils/testnet.rs +++ scroll-reth/crates/net/network/src/test_utils/testnet.rs @@ -104,6 +104,7 @@ &mut self.peers }   /// Return a slice of all peers. + #[allow(clippy::missing_const_for_fn)] pub fn peers(&self) -> &[Peer<C, Pool>] { &self.peers } @@ -354,6 +355,7 @@ rx.await.unwrap() }   /// Returns the [`PeerHandle`]s of this [`Testnet`]. + #[allow(clippy::missing_const_for_fn)] pub fn peers(&self) -> &[PeerHandle<Pool>] { &self.peers } @@ -467,7 +469,7 @@ pub fn install_request_handler(&mut self) { let (tx, rx) = channel(ETH_REQUEST_CHANNEL_CAPACITY); self.network.set_eth_request_handler(tx); let peers = self.network.peers_handle(); - let request_handler = EthRequestHandler::new(self.client.clone(), peers, rx); + let request_handler = EthRequestHandler::new(self.client.clone(), peers, rx, None); self.request_handler = Some(request_handler); }
diff --git reth/crates/net/network/src/transactions/fetcher.rs scroll-reth/crates/net/network/src/transactions/fetcher.rs index 1cb725e4efbe92fb56a452d695808b5df7c00cef..4859352f8c4a4fc0b6cd3b35d3e0e361d1f443c8 100644 --- reth/crates/net/network/src/transactions/fetcher.rs +++ scroll-reth/crates/net/network/src/transactions/fetcher.rs @@ -284,9 +284,7 @@ let mut surplus_hashes = RequestTxHashes::default();   // folds size based on expected response size and adds selected hashes to the request // list and the other hashes to the surplus list - loop { - let Some((hash, metadata)) = hashes_from_announcement_iter.next() else { break }; - + for (hash, metadata) in hashes_from_announcement_iter.by_ref() { let Some((_ty, size)) = metadata else { unreachable!("this method is called upon reception of an eth68 announcement") };
diff --git reth/crates/net/network/src/transactions/mod.rs scroll-reth/crates/net/network/src/transactions/mod.rs index 9eb07e7b1a0f82ee570cb666d8802ff6a3a160c3..f4ef42523d5890a2d0a71f13b65d6553c577355f 100644 --- reth/crates/net/network/src/transactions/mod.rs +++ scroll-reth/crates/net/network/src/transactions/mod.rs @@ -28,8 +28,7 @@ use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE}; use crate::{ budget::{ DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, cache::LruCache, duration_metered_exec, metered_poll_nested_stream_with_budget, @@ -77,7 +76,7 @@ task::{Context, Poll}, time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; -use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; +use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, trace};   /// The future for importing transactions into the pool. @@ -339,7 +338,7 @@ /// requirements, these include: /// - no nonce gaps /// - all dynamic fee requirements are (currently) met /// - account has enough balance to cover the transaction's gas - pending_transactions: ReceiverStream<TxHash>, + pending_transactions: mpsc::Receiver<TxHash>, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). transaction_events: UnboundedMeteredReceiver<NetworkTransactionEvent<N>>, /// How the `TransactionsManager` is configured. @@ -422,7 +421,7 @@ bad_imports: LruCache::new(DEFAULT_MAX_COUNT_BAD_IMPORTS), peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), - pending_transactions: ReceiverStream::new(pending), + pending_transactions: pending, transaction_events: UnboundedMeteredReceiver::new( from_network, NETWORK_POOL_TRANSACTIONS_SCOPE, @@ -1529,14 +1528,16 @@ // // We don't expect this buffer to be large, since only pending transactions are // emitted here. let mut new_txs = Vec::new(); - let maybe_more_pending_txns = metered_poll_nested_stream_with_budget!( - poll_durations.acc_imported_txns, - "net::tx", - "Pending transactions stream", - DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - this.pending_transactions.poll_next_unpin(cx), - |hash| new_txs.push(hash) - ); + let maybe_more_pending_txns = match this.pending_transactions.poll_recv_many( + cx, + &mut new_txs, + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, + ) { + Poll::Ready(count) => { + count == SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE + } + Poll::Pending => false, + }; if !new_txs.is_empty() { this.on_new_pending_transactions(new_txs); }
diff --git reth/crates/net/network/src/transform/header.rs scroll-reth/crates/net/network/src/transform/header.rs new file mode 100644 index 0000000000000000000000000000000000000000..e7a7880f1b4fcc355b190aa7fd75f4f9f7a3a8ea --- /dev/null +++ scroll-reth/crates/net/network/src/transform/header.rs @@ -0,0 +1,32 @@ +//! Abstraction over a transformation applied to headers. + +use reth_primitives_traits::BlockHeader; + +/// An instance of the trait applies a mapping to the input headers. +#[async_trait::async_trait] +pub trait HeaderTransform<H: BlockHeader>: std::fmt::Debug + Send + Sync { + /// Applies a mapping to the input headers. + async fn map(&self, headers: Vec<H>) -> Vec<H>; +} + +#[async_trait::async_trait] +impl<H: BlockHeader> HeaderTransform<H> for () { + async fn map(&self, headers: Vec<H>) -> Vec<H> { + headers + } +} + +/// An instance of the trait applies a mapping to headers that are being sent to a peer in response +/// to a request. +#[async_trait::async_trait] +pub trait HeaderResponseTransform<H: BlockHeader>: std::fmt::Debug + Send + Sync { + /// Applies a mapping to the response headers. + async fn map(&self, header: H) -> H; +} + +#[async_trait::async_trait] +impl<H: BlockHeader> HeaderResponseTransform<H> for () { + async fn map(&self, header: H) -> H { + header + } +}
diff --git reth/crates/net/network/src/transform/mod.rs scroll-reth/crates/net/network/src/transform/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2081c2bd939cc50a1accdacd545427963b9fb1b7 --- /dev/null +++ scroll-reth/crates/net/network/src/transform/mod.rs @@ -0,0 +1,4 @@ +//! Provides an abstraction over transformation hooks that can be applied to data response from +//! peers. + +pub mod header;
diff --git reth/crates/net/network/tests/it/connect.rs scroll-reth/crates/net/network/tests/it/connect.rs index 1a3371a9073f2d03d76ea3b1fd196fcacd167a91..2514e239ea4bc5a8f976365387a7113c9e41fc21 100644 --- reth/crates/net/network/tests/it/connect.rs +++ scroll-reth/crates/net/network/tests/it/connect.rs @@ -234,7 +234,7 @@ let (handle, network, _, requests) = NetworkManager::new(config) .await .unwrap() .into_builder() - .request_handler(client) + .request_handler(client, None) .split_with_handle();   let mut events = handle.event_listener(); @@ -271,7 +271,7 @@ let (handle, network, transactions, requests) = NetworkManager::new(config) .await .unwrap() .into_builder() - .request_handler(client) + .request_handler(client, None) .transactions(testing_pool(), transactions_manager_config) .split_with_handle();
diff --git reth/crates/net/p2p/src/full_block.rs scroll-reth/crates/net/p2p/src/full_block.rs index 06128c6b54235609d37ac8440eb90c400a199773..ba855dcfd3b0f1a99909801639524bb14c7667a8 100644 --- reth/crates/net/p2p/src/full_block.rs +++ scroll-reth/crates/net/p2p/src/full_block.rs @@ -10,7 +10,7 @@ }; use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; use core::marker::PhantomData; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator}; use reth_eth_wire_types::{EthNetworkPrimitives, HeadersDirection, NetworkPrimitives}; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives_traits::{SealedBlock, SealedHeader};
diff --git reth/crates/net/peers/src/bootnodes/mod.rs scroll-reth/crates/net/peers/src/bootnodes/mod.rs index e510e0b1cda2cc8dedc793703107497342c21891..17fa1741d8dbc646ff09c239df0bf73d0643fc4e 100644 --- reth/crates/net/peers/src/bootnodes/mod.rs +++ scroll-reth/crates/net/peers/src/bootnodes/mod.rs @@ -9,6 +9,9 @@ mod optimism; pub use optimism::*;   +mod scroll; +pub use scroll::*; + /// Returns parsed mainnet nodes pub fn mainnet_nodes() -> Vec<NodeRecord> { parse_nodes(&MAINNET_BOOTNODES[..]) @@ -47,6 +50,16 @@ /// Returns parsed op-stack base testnet nodes pub fn base_testnet_nodes() -> Vec<NodeRecord> { parse_nodes(OP_TESTNET_BOOTNODES) +} + +/// Returns parsed scroll mainnet nodes +pub fn scroll_nodes() -> Vec<NodeRecord> { + parse_nodes(SCROLL_BOOTNODES) +} + +/// Returns parsed scroll seplo nodes +pub fn scroll_sepolia_nodes() -> Vec<NodeRecord> { + parse_nodes(SCROLL_SEPOLIA_BOOTNODES) }   /// Parses all the nodes
diff --git reth/crates/net/peers/src/bootnodes/scroll.rs scroll-reth/crates/net/peers/src/bootnodes/scroll.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d85fcc8d077b6ea67ea3a1c5426620fb2185500 --- /dev/null +++ scroll-reth/crates/net/peers/src/bootnodes/scroll.rs @@ -0,0 +1,17 @@ +//! Scroll bootnodes come from <https://github.com/scroll-tech/go-ethereum/blob/develop/params/bootnodes.go> + +/// Scroll mainnet boot nodes. +pub static SCROLL_BOOTNODES: &[&str] = &[ + "enode://c6ac91f43df3d63916ac1ae411cdd5ba249d55d48a7bec7f8cd5bb351a31aba437e5a69e8a1de74d73fdfeba8af1cfe9caf9846ecd3abf60d1ffdf4925b55b23@54.186.123.248:30303", + "enode://fdcc807b5d1353f3a1e98b90208ce6ef1b7d446136e51eaa8ad657b55518a2f8b37655e42375d61622e6ea18f3faf9d070c9bbdf012cf5484bcbad33b7a15fb1@44.227.91.206:30303", + "enode://6beb5a3efbb39be73d17630b6da48e94c0ce7ec665172111463cb470197b20c12faa1fa6f835b81c28571277d1017e65c4e426cc92a46141cf69118ecf28ac03@44.237.194.52:30303", + "enode://7cf893d444eb8e129dca0f6485b3df579911606e7c728be4fa55fcc5f155a37c3ce07d217ccec5447798bde465ac2bdba2cb8763d107e9f3257e787579e9f27e@52.35.203.107:30303", + "enode://c7b2d94e95da343db6e667a01cef90376a592f2d277fbcbf6e9c9186734ed8003d01389571bd10cdbab7a6e5adfa6f0c7b55644d0db24e0b9deb4ec80f842075@54.70.236.187:30303", +]; + +/// Scroll sepolia boot nodes. +pub static SCROLL_SEPOLIA_BOOTNODES: &[&str] = &[ + "enode://ceb1636bac5cbb262e5ad5b2cd22014bdb35ffe7f58b3506970d337a63099481814a338dbcd15f2d28757151e3ecd40ba38b41350b793cd0d910ff0436654f8c@35.85.84.250:30303", + "enode://29cee709c400533ae038a875b9ca975c8abef9eade956dcf3585e940acd5c0ae916968f514bd37d1278775aad1b7db30f7032a70202a87fd7365bd8de3c9f5fc@44.242.39.33:30303", + "enode://dd1ac5433c5c2b04ca3166f4cb726f8ff6d2da83dbc16d9b68b1ea83b7079b371eb16ef41c00441b6e85e32e33087f3b7753ea9e8b1e3f26d3e4df9208625e7f@54.148.111.168:30303", +];
diff --git reth/crates/node/builder/src/builder/mod.rs scroll-reth/crates/node/builder/src/builder/mod.rs index fb22a82795e61fba1207c615766d093c25ed10a4..2f543d8b71b6b3b80df98f292d7e669edb0f439f 100644 --- reth/crates/node/builder/src/builder/mod.rs +++ scroll-reth/crates/node/builder/src/builder/mod.rs @@ -6,7 +6,7 @@ use crate::{ common::WithConfigs, components::NodeComponentsBuilder, node::FullNode, - rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext, RpcHandleProvider}, BlockReaderFor, DebugNode, DebugNodeLauncher, EngineNodeLauncher, LaunchNode, Node, }; use alloy_eips::eip4844::env_settings::EnvKzgSettings; @@ -17,6 +17,7 @@ use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_exex::ExExContext; use reth_network::{ transactions::{TransactionPropagationPolicy, TransactionsManagerConfig}, + transform::header::HeaderResponseTransform, NetworkBuilder, NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, NetworkPrimitives, }; @@ -49,6 +50,14 @@ /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter<DB, Types> = FullNodeTypesAdapter<Types, DB, BlockchainProvider<NodeTypesWithDBAdapter<Types, DB>>>; + +/// A full node adapter for a reth node with the builtin provider type +type FullNodeAdapter<N, DB> = NodeAdapter< + RethFullAdapter<DB, N>, + <<N as Node<RethFullAdapter<DB, N>>>::ComponentsBuilder as NodeComponentsBuilder< + RethFullAdapter<DB, N>, + >>::Components, +>;   #[expect(clippy::doc_markdown)] #[cfg_attr(doc, aquamarine::aquamarine)] @@ -391,11 +400,10 @@ >>::Node, > where N: Node<RethFullAdapter<DB, N>, ChainSpec = ChainSpec> + NodeTypesForProvider, - N::AddOns: RethRpcAddOns< - NodeAdapter< - RethFullAdapter<DB, N>, - <N::ComponentsBuilder as NodeComponentsBuilder<RethFullAdapter<DB, N>>>::Components, - >, + N::AddOns: RethRpcAddOns<FullNodeAdapter<N, DB>>, + <N::AddOns as NodeAddOns<FullNodeAdapter<N, DB>>>::Handle: RpcHandleProvider< + FullNodeAdapter<N, DB>, + <N::AddOns as RethRpcAddOns<FullNodeAdapter<N, DB>>>::EthApi, >, N::Primitives: FullNodePrimitives, EngineNodeLauncher: LaunchNode< @@ -448,6 +456,12 @@ where T: FullNodeTypes, CB: NodeComponentsBuilder<T>, AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>, + <AO as NodeAddOns< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + >>::Handle: RpcHandleProvider< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + <AO as RethRpcAddOns<NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>>>::EthApi, + >, { /// Returns a reference to the node builder's config. pub const fn config(&self) -> &NodeConfig<<T::Types as NodeTypes>::ChainSpec> { @@ -572,10 +586,10 @@ /// .node(EthereumNode::default()) /// .extend_rpc_modules(|ctx| { /// // Access node components, so they can used by the CustomApi /// let pool = ctx.pool().clone(); - /// + /// /// // Add custom RPC namespace /// ctx.modules.merge_configured(CustomApi { pool }.into_rpc())?; - /// + /// /// Ok(()) /// }) /// .build()?; @@ -776,6 +790,7 @@ pub fn start_network<N, Pool>( &self, builder: NetworkBuilder<(), (), N>, pool: Pool, + request_transform: Option<Arc<dyn HeaderResponseTransform<N::BlockHeader>>>, ) -> NetworkHandle<N> where N: NetworkPrimitives, @@ -793,6 +808,7 @@ builder, pool, self.config().network.transactions_manager_config(), self.config().network.tx_propagation_policy, + request_transform, ) }   @@ -808,6 +824,7 @@ builder: NetworkBuilder<(), (), N>, pool: Pool, tx_config: TransactionsManagerConfig, propagation_policy: Policy, + request_transform: Option<Arc<dyn HeaderResponseTransform<N::BlockHeader>>>, ) -> NetworkHandle<N> where N: NetworkPrimitives, @@ -823,7 +840,7 @@ Policy: TransactionPropagationPolicy + Debug, { let (handle, network, txpool, eth) = builder .transactions_with_policy(pool, tx_config, propagation_policy) - .request_handler(self.provider().clone()) + .request_handler(self.provider().clone(), request_transform) .split_with_handle();   self.executor.spawn_critical("p2p txpool", Box::pin(txpool));
diff --git reth/crates/node/builder/src/builder/states.rs scroll-reth/crates/node/builder/src/builder/states.rs index f60b56d57e76dc1b87a0b5822278215a5438e65b..97d2686e3c8ffdb220b0f5c5fe4f62d07aef3b94 100644 --- reth/crates/node/builder/src/builder/states.rs +++ scroll-reth/crates/node/builder/src/builder/states.rs @@ -9,7 +9,7 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, launch::LaunchNode, - rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext, RpcHandleProvider}, AddOns, ComponentsFor, FullNode, };   @@ -249,6 +249,12 @@ where T: FullNodeTypes, CB: NodeComponentsBuilder<T>, AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>, + <AO as NodeAddOns< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + >>::Handle: RpcHandleProvider< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + <AO as RethRpcAddOns<NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>>>::EthApi, + >, { /// Launches the node with the given launcher. pub fn launch_with<L>(self, launcher: L) -> L::Future @@ -298,7 +304,7 @@ use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::noop::NoopEvmConfig; use reth_evm_ethereum::MockEvmConfig; use reth_network::EthNetworkPrimitives; - use reth_network_api::noop::NoopNetwork; + use reth_network_api::{self, noop::NoopNetwork}; use reth_node_api::FullNodeTypesAdapter; use reth_node_ethereum::EthereumNode; use reth_payload_builder::PayloadBuilderHandle;
diff --git reth/crates/node/builder/src/components/pool.rs scroll-reth/crates/node/builder/src/components/pool.rs index ddc137031b7fac8056a5b2316012dc8055b438c4..ae4446ce51b60718ecae56eb7063f86637186d5d 100644 --- reth/crates/node/builder/src/components/pool.rs +++ scroll-reth/crates/node/builder/src/components/pool.rs @@ -1,15 +1,15 @@ //! Pool component for the node builder.   +use crate::{BuilderContext, FullNodeTypes}; use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; +use reth_chainspec::ChainSpecProvider; use reth_node_api::TxTy; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, }; use std::{collections::HashSet, future::Future}; - -use crate::{BuilderContext, FullNodeTypes};   /// A type that knows how to build the transaction pool. pub trait PoolBuilder<Node: FullNodeTypes>: Send { @@ -166,14 +166,12 @@ /// Create blob store with default configuration. pub fn create_blob_store<Node: FullNodeTypes>( ctx: &BuilderContext<Node>, ) -> eyre::Result<DiskFileBlobStore> { - let data_dir = ctx.config().datadir(); - Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open( - data_dir.blobstore(), - Default::default(), - )?) + let cache_size = Some(ctx.config().txpool.max_cached_entries); + create_blob_store_with_cache(ctx, cache_size) }   -/// Create blob store with custom cache size configuration. +/// Create blob store with custom cache size configuration for how many blobs should be cached in +/// memory. pub fn create_blob_store_with_cache<Node: FullNodeTypes>( ctx: &BuilderContext<Node>, cache_size: Option<u32>, @@ -236,11 +234,13 @@ Pool::Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>, { let chain_events = ctx.provider().canonical_state_stream(); let client = ctx.provider().clone(); + let chain_spec = client.chain_spec();   ctx.task_executor().spawn_critical( "txpool maintenance task", reth_transaction_pool::maintain::maintain_transaction_pool_future( client, + chain_spec, pool, chain_events, ctx.task_executor().clone(),
diff --git reth/crates/node/builder/src/engine_api_ext.rs scroll-reth/crates/node/builder/src/engine_api_ext.rs index 936a2e190512948237d07f7ff6a03a237d6cda15..33d1d3e63ad472c61a97415c2ff646d43389f49e 100644 --- reth/crates/node/builder/src/engine_api_ext.rs +++ scroll-reth/crates/node/builder/src/engine_api_ext.rs @@ -5,7 +5,6 @@ use crate::rpc::EngineApiBuilder; use eyre::Result; use reth_node_api::{AddOnsContext, FullNodeComponents}; -use reth_rpc_api::IntoEngineApiRpcModule;   /// Provides access to an `EngineApi` instance with a callback #[derive(Debug)] @@ -27,7 +26,7 @@ impl<N, B, F> EngineApiBuilder<N> for EngineApiExt<B, F> where B: EngineApiBuilder<N>, N: FullNodeComponents, - B::EngineApi: IntoEngineApiRpcModule + Send + Sync + Clone + 'static, + B::EngineApi: Clone, F: FnOnce(B::EngineApi) + Send + Sync + 'static, { type EngineApi = B::EngineApi;
diff --git reth/crates/node/builder/src/handle.rs scroll-reth/crates/node/builder/src/handle.rs index 2997a8687afdb3114c52255e0ac4407fea725b0e..112c85408b1abf7a67c82164e3c747bb7f548843 100644 --- reth/crates/node/builder/src/handle.rs +++ scroll-reth/crates/node/builder/src/handle.rs @@ -3,11 +3,18 @@ use reth_node_api::FullNodeComponents; use reth_node_core::exit::NodeExitFuture;   -use crate::{node::FullNode, rpc::RethRpcAddOns}; +use crate::{ + node::FullNode, + rpc::{RethRpcAddOns, RpcHandleProvider}, +};   /// A Handle to the launched node. #[must_use = "Needs to await the node exit future"] -pub struct NodeHandle<Node: FullNodeComponents, AddOns: RethRpcAddOns<Node>> { +pub struct NodeHandle<Node: FullNodeComponents, AddOns: RethRpcAddOns<Node>> +where + <AddOns as reth_node_api::NodeAddOns<Node>>::Handle: + RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, +{ /// All node components. pub node: FullNode<Node, AddOns>, /// The exit future of the node. @@ -18,6 +25,8 @@ impl<Node, AddOns> NodeHandle<Node, AddOns> where Node: FullNodeComponents, AddOns: RethRpcAddOns<Node>, + <AddOns as reth_node_api::NodeAddOns<Node>>::Handle: + RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, { /// Waits for the node to exit, if it was configured to exit. pub async fn wait_for_node_exit(self) -> eyre::Result<()> { @@ -29,6 +38,8 @@ impl<Node, AddOns> fmt::Debug for NodeHandle<Node, AddOns> where Node: FullNodeComponents, AddOns: RethRpcAddOns<Node>, + <AddOns as reth_node_api::NodeAddOns<Node>>::Handle: + RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NodeHandle")
diff --git reth/crates/node/builder/src/launch/common.rs scroll-reth/crates/node/builder/src/launch/common.rs index 3a35c4183f105a73de3e10829c38ec9623b10a90..2d1fb6924d83de3dab40ca50626b85ee3e089377 100644 --- reth/crates/node/builder/src/launch/common.rs +++ scroll-reth/crates/node/builder/src/launch/common.rs @@ -41,12 +41,10 @@ use eyre::Context; use rayon::ThreadPoolBuilder; use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; -use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; -use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; -use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; +use reth_evm::ConfigureEvm; use reth_exex::ExExManagerHandle; use reth_fs_util as fs; use reth_network_p2p::headers::client::HeadersClient; @@ -67,25 +65,19 @@ version::VersionInfo, }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StaticFileProviderFactory, + BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, ProviderResult, + StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{ - sets::DefaultStages, stages::EraImportSource, MetricEvent, PipelineBuilder, PipelineTarget, - StageId, -}; +use reth_stages::{stages::EraImportSource, MetricEvent}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; use reth_transaction_pool::TransactionPool; use std::{sync::Arc, thread::available_parallelism}; -use tokio::sync::{ - mpsc::{unbounded_channel, UnboundedSender}, - oneshot, watch, -}; +use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};   use futures::{future::Either, stream, Stream, StreamExt}; use reth_node_ethstats::EthStatsService; @@ -466,70 +458,13 @@ where N: ProviderNodeTypes<DB = DB, ChainSpec = ChainSpec>, Evm: ConfigureEvm<Primitives = N::Primitives> + 'static, { - let factory = ProviderFactory::new( + Ok(ProviderFactory::new( self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, ) .with_prune_modes(self.prune_modes()) - .with_static_files_metrics(); - - let has_receipt_pruning = - self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); - - // Check for consistency between database and static files. If it fails, it unwinds to - // the first block that's consistent between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? - { - // Highly unlikely to happen, and given its destructive nature, it's better to panic - // instead. - assert_ne!( - unwind_target, - PipelineTarget::Unwind(0), - "A static file <> database inconsistency was found that would trigger an unwind to block 0" - ); - - info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); - - let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); - - // Builds an unwind-only pipeline - let pipeline = PipelineBuilder::default() - .add_stages(DefaultStages::new( - factory.clone(), - tip_rx, - Arc::new(NoopConsensus::default()), - NoopHeaderDownloader::default(), - NoopBodiesDownloader::default(), - NoopEvmConfig::<Evm>::default(), - self.toml_config().stages.clone(), - self.prune_modes(), - None, - )) - .build( - factory.clone(), - StaticFileProducer::new(factory.clone(), self.prune_modes()), - ); - - // Unwinds to block - let (tx, rx) = oneshot::channel(); - - // Pipeline should be run as blocking and panic if it fails. - self.task_executor().spawn_critical_blocking( - "pipeline task", - Box::pin(async move { - let (_, result) = pipeline.run_as_fut(Some(unwind_target)).await; - let _ = tx.send(result); - }), - ); - rx.await?.inspect_err(|err| { - error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") - })?; - } - - Ok(factory) + .with_static_files_metrics()) }   /// Creates a new [`ProviderFactory`] and attaches it to the launch context. @@ -582,7 +517,7 @@ pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { // ensure recorder runs upkeep periodically install_prometheus_recorder().spawn_upkeep();   - let listen_addr = self.node_config().metrics; + let listen_addr = self.node_config().metrics.prometheus; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); let config = MetricServerConfig::new( @@ -852,21 +787,6 @@ pub const fn blockchain_db(&self) -> &T::Provider { &self.node_adapter().provider }   - /// Returns the initial backfill to sync to at launch. - /// - /// This returns the configured `debug.tip` if set, otherwise it will check if backfill was - /// previously interrupted and returns the block hash of the last checkpoint, see also - /// [`Self::check_pipeline_consistency`] - pub fn initial_backfill_target(&self) -> ProviderResult<Option<B256>> { - let mut initial_target = self.node_config().debug.tip; - - if initial_target.is_none() { - initial_target = self.check_pipeline_consistency()?; - } - - Ok(initial_target) - } - /// Returns true if the node should terminate after the initial backfill run. /// /// This is the case if any of these configs are set: @@ -880,7 +800,7 @@ /// Ensures that the database matches chain-specific requirements. /// /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past /// bedrock height) - fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + pub fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { if self.chain_spec().is_optimism() && !self.is_dev() && self.chain_id() == Chain::optimism_mainnet() @@ -896,54 +816,6 @@ } }   Ok(()) - } - - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - pub fn check_pipeline_consistency(&self) -> ProviderResult<Option<B256>> { - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = self - .blockchain_db() - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { - let stage_checkpoint = self - .blockchain_db() - .get_stage_checkpoint(*stage_id)? - .unwrap_or_default() - .block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return self.blockchain_db().block_hash(first_stage_checkpoint); - } - } - - self.ensure_chain_specific_db_checks()?; - - Ok(None) }   /// Expire the pre-merge transactions if the node is configured to do so and the chain has a
diff --git reth/crates/node/builder/src/launch/debug.rs scroll-reth/crates/node/builder/src/launch/debug.rs index f5e9745cddc8ec6f82779bfa00be3c5a54aedbcd..a79a11b9dab4cdd012d8884ca295fede4d4ceeb2 100644 --- reth/crates/node/builder/src/launch/debug.rs +++ scroll-reth/crates/node/builder/src/launch/debug.rs @@ -1,5 +1,8 @@ use super::LaunchNode; -use crate::{rpc::RethRpcAddOns, EngineNodeLauncher, Node, NodeHandle}; +use crate::{ + rpc::{RethRpcAddOns, RpcHandleProvider}, + EngineNodeLauncher, Node, NodeHandle, +}; use alloy_consensus::transaction::Either; use alloy_provider::network::AnyNetwork; use jsonrpsee::core::{DeserializeOwned, Serialize}; @@ -130,6 +133,8 @@ where N: FullNodeComponents<Types: DebugNode<N>>, AddOns: RethRpcAddOns<N>, L: LaunchNode<Target, Node = NodeHandle<N, AddOns>>, + <AddOns as reth_node_api::NodeAddOns<N>>::Handle: + RpcHandleProvider<N, <AddOns as RethRpcAddOns<N>>::EthApi>, { pub fn with_payload_attributes_builder( self, @@ -155,7 +160,11 @@ map_attributes: Some(Box::new(f)), } }   - async fn launch_node(self) -> eyre::Result<NodeHandle<N, AddOns>> { + async fn launch_node(self) -> eyre::Result<NodeHandle<N, AddOns>> + where + <AddOns as reth_node_api::NodeAddOns<N>>::Handle: + RpcHandleProvider<N, <AddOns as RethRpcAddOns<N>>::EthApi>, + { let Self { inner, target, local_payload_attributes_builder, map_attributes } = self;   let handle = inner.launch_node(target).await?; @@ -175,7 +184,7 @@ }) .await?;   let rpc_consensus_client = DebugConsensusClient::new( - handle.node.add_ons_handle.beacon_engine_handle.clone(), + handle.node.rpc_handle().beacon_engine_handle.clone(), Arc::new(block_provider), );   @@ -207,7 +216,7 @@ chain.id(), N::Types::rpc_to_primitive_block, ); let rpc_consensus_client = DebugConsensusClient::new( - handle.node.add_ons_handle.beacon_engine_handle.clone(), + handle.node.rpc_handle().beacon_engine_handle.clone(), Arc::new(block_provider), ); handle.node.task_executor.spawn_critical("etherscan consensus client", async move { @@ -220,7 +229,7 @@ info!(target: "reth::cli", "Using local payload attributes builder for dev mode");   let blockchain_db = handle.node.provider.clone(); let chain_spec = config.chain.clone(); - let beacon_engine_handle = handle.node.add_ons_handle.beacon_engine_handle.clone(); + let beacon_engine_handle = handle.node.rpc_handle().beacon_engine_handle.clone(); let pool = handle.node.pool.clone(); let payload_builder_handle = handle.node.payload_builder_handle.clone();   @@ -259,6 +268,8 @@ where Target: Send + 'static, N: FullNodeComponents<Types: DebugNode<N>>, AddOns: RethRpcAddOns<N> + 'static, + <AddOns as reth_node_api::NodeAddOns<N>>::Handle: + RpcHandleProvider<N, <AddOns as RethRpcAddOns<N>>::EthApi>, L: LaunchNode<Target, Node = NodeHandle<N, AddOns>> + 'static, { type Output = eyre::Result<NodeHandle<N, AddOns>>; @@ -274,6 +285,8 @@ where Target: Send + 'static, N: FullNodeComponents<Types: DebugNode<N>>, AddOns: RethRpcAddOns<N> + 'static, + <AddOns as reth_node_api::NodeAddOns<N>>::Handle: + RpcHandleProvider<N, <AddOns as RethRpcAddOns<N>>::EthApi>, L: LaunchNode<Target, Node = NodeHandle<N, AddOns>> + 'static, { type Node = NodeHandle<N, AddOns>;
diff --git reth/crates/node/builder/src/launch/engine.rs scroll-reth/crates/node/builder/src/launch/engine.rs index 5f6c54afc96d4c302114aa00bfd47cd59ff40fb0..d7b139c4d503ff5b71991383930431c6fb3b846c 100644 --- reth/crates/node/builder/src/launch/engine.rs +++ scroll-reth/crates/node/builder/src/launch/engine.rs @@ -3,7 +3,7 @@ use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandleProvider}, setup::build_networked_pipeline, AddOns, AddOnsContext, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -75,6 +75,15 @@ >, CB: NodeComponentsBuilder<T>, AO: RethRpcAddOns<NodeAdapter<T, CB::Components>> + EngineValidatorAddOn<NodeAdapter<T, CB::Components>>, + <AO as reth_node_api::NodeAddOns< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + >>::Handle: + RpcHandleProvider< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + <AO as RethRpcAddOns< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + >>::EthApi, + >, { let Self { ctx, engine_tree_config } = self; let NodeBuilderWithComponents { @@ -117,9 +126,6 @@ Ok(BlockchainProvider::new(provider_factory)?) })? .with_components(components_builder, on_component_initialized).await?;   - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?;   @@ -141,7 +147,7 @@ info!(target: "reth::cli", "StaticFileProducer initialized");   let consensus = Arc::new(ctx.components().consensus().clone());   - let pipeline = build_networked_pipeline( + let mut pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), consensus.clone(), @@ -157,7 +163,18 @@ ctx.era_import_source(), )?;   // The new engine writes directly to static files. This ensures that they're up to the tip. - pipeline.move_to_static_files()?; + pipeline.ensure_static_files_consistency().await?; + + // Try to expire pre-merge transaction history if configured + ctx.expire_pre_merge_transactions()?; + + let initial_target = if let Some(tip) = ctx.node_config().debug.tip { + Some(tip) + } else { + pipeline.initial_backfill_target()? + }; + + ctx.ensure_chain_specific_db_checks()?;   let pipeline_events = pipeline.events();   @@ -228,6 +245,7 @@ );   info!(target: "reth::cli", "Consensus engine initialized");   + #[allow(clippy::needless_continue)] let events = stream_select!( event_sender.new_listener().map(Into::into), pipeline_events.map(Into::into), @@ -245,11 +263,9 @@ events, )), );   - let RpcHandle { rpc_server_handles, rpc_registry, engine_events, beacon_engine_handle } = - add_ons.launch_add_ons(add_ons_ctx).await?; + let add_ons_handle = add_ons.launch_add_ons(add_ons_ctx).await?;   // Run consensus engine to completion - let initial_target = ctx.initial_backfill_target()?; let mut built_payloads = ctx .components() .payload_builder_handle() @@ -339,12 +355,7 @@ payload_builder_handle: ctx.components().payload_builder_handle().clone(), task_executor: ctx.task_executor().clone(), config: ctx.node_config().clone(), data_dir: ctx.data_dir().clone(), - add_ons_handle: RpcHandle { - rpc_server_handles, - rpc_registry, - engine_events, - beacon_engine_handle, - }, + add_ons_handle, }; // Notify on node started on_node_started.on_event(FullNode::clone(&full_node))?; @@ -375,6 +386,12 @@ CB: NodeComponentsBuilder<T> + 'static, AO: RethRpcAddOns<NodeAdapter<T, CB::Components>> + EngineValidatorAddOn<NodeAdapter<T, CB::Components>> + 'static, + <AO as reth_node_api::NodeAddOns< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + >>::Handle: RpcHandleProvider< + NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>, + <AO as RethRpcAddOns<NodeAdapter<T, <CB as NodeComponentsBuilder<T>>::Components>>>::EthApi, + >, { type Node = NodeHandle<NodeAdapter<T, CB::Components>, AO>; type Future = Pin<Box<dyn Future<Output = eyre::Result<Self::Node>> + Send>>;
diff --git reth/crates/node/builder/src/node.rs scroll-reth/crates/node/builder/src/node.rs index ca44ad9523da1d5ba70b2d3dcd7a6b55a88f7475..2864b9f9245d9bafeeeebd1d370a61f99c4bf714 100644 --- reth/crates/node/builder/src/node.rs +++ scroll-reth/crates/node/builder/src/node.rs @@ -2,6 +2,7 @@ use reth_db::DatabaseEnv; // re-export the node api types pub use reth_node_api::{FullNodeTypes, NodeTypes};   +use super::rpc::RpcHandleProvider; use crate::{ components::NodeComponentsBuilder, rpc::RethRpcAddOns, NodeAdapter, NodeAddOns, NodeHandle, RethFullAdapter, @@ -158,15 +159,17 @@ where Payload: PayloadTypes, Node: FullNodeComponents<Types: NodeTypes<Payload = Payload>>, AddOns: RethRpcAddOns<Node>, + <AddOns as reth_node_api::NodeAddOns<Node>>::Handle: + RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, { /// Returns the [`RpcServerHandle`] to the started rpc server. - pub const fn rpc_server_handle(&self) -> &RpcServerHandle { - &self.add_ons_handle.rpc_server_handles.rpc + pub fn rpc_server_handle(&self) -> &RpcServerHandle { + &self.add_ons_handle.rpc_handle().rpc_server_handles.rpc }   /// Returns the [`AuthServerHandle`] to the started authenticated engine API server. - pub const fn auth_server_handle(&self) -> &AuthServerHandle { - &self.add_ons_handle.rpc_server_handles.auth + pub fn auth_server_handle(&self) -> &AuthServerHandle { + &self.add_ons_handle.rpc_handle().rpc_server_handles.auth } }   @@ -175,11 +178,13 @@ where Engine: EngineTypes, Node: FullNodeComponents<Types: NodeTypes<Payload = Engine>>, AddOns: RethRpcAddOns<Node>, + <AddOns as reth_node_api::NodeAddOns<Node>>::Handle: + RpcHandleProvider<Node, <AddOns as RethRpcAddOns<Node>>::EthApi>, { /// Returns the [`EngineApiClient`] interface for the authenticated engine API. /// /// This will send authenticated http requests to the node's auth server. - pub fn engine_http_client(&self) -> impl EngineApiClient<Engine> { + pub fn engine_http_client(&self) -> impl EngineApiClient<Engine> + use<Engine, Node, AddOns> { self.auth_server_handle().http_client() }
diff --git reth/crates/node/builder/src/rpc.rs scroll-reth/crates/node/builder/src/rpc.rs index 70adcc83d6934c6216c391f80d7783b353ee9930..774f508c549ae59f1ffcee151dfae524c26762ce 100644 --- reth/crates/node/builder/src/rpc.rs +++ scroll-reth/crates/node/builder/src/rpc.rs @@ -385,6 +385,20 @@ &self.engine_events } }   +/// Trait to provide access to the RPC handle. +pub trait RpcHandleProvider<Node: FullNodeComponents, EthApi: EthApiTypes> { + /// Returns the rpc server handles. + fn rpc_handle(&self) -> &RpcHandle<Node, EthApi>; +} + +impl<Node: FullNodeComponents, EthApi: EthApiTypes> RpcHandleProvider<Node, EthApi> + for RpcHandle<Node, EthApi> +{ + fn rpc_handle(&self) -> &Self { + self + } +} + /// Handle returned when only the regular RPC server (HTTP/WS/IPC) is launched. /// /// This handle provides access to the RPC server endpoints and registry, but does not @@ -494,7 +508,7 @@ > { /// Additional RPC add-ons. pub hooks: RpcHooks<Node, EthB::EthApi>, /// Builder for `EthApi` - eth_api_builder: EthB, + pub eth_api_builder: EthB, /// Payload validator builder payload_validator_builder: PVB, /// Builder for `EngineApi` @@ -1103,8 +1117,9 @@ }   /// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher /// implementations. -pub trait RethRpcAddOns<N: FullNodeComponents>: - NodeAddOns<N, Handle = RpcHandle<N, Self::EthApi>> +pub trait RethRpcAddOns<N: FullNodeComponents>: NodeAddOns<N> +where + Self::Handle: RpcHandleProvider<N, Self::EthApi>, { /// eth API implementation. type EthApi: EthApiTypes;
diff --git reth/crates/node/core/Cargo.toml scroll-reth/crates/node/core/Cargo.toml index 2240fa98837726ba849ed15c61afdd3fb610a5c9..bf784b5070366d0263a122a11bd7c5ea12511ef3 100644 --- reth/crates/node/core/Cargo.toml +++ scroll-reth/crates/node/core/Cargo.toml @@ -77,6 +77,8 @@ [features] # Features for vergen to generate correct env vars jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["alloy-primitives/asm-keccak"] +# Feature to enable opentelemetry export +otlp = ["reth-tracing/otlp"]   [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] }
diff --git reth/crates/node/core/src/args/engine.rs scroll-reth/crates/node/core/src/args/engine.rs index 88179a6b40e0a71377432c0ce796e9cafb8a363c..6b678b5789bb7d0c784f47f72e86b47eb651c5e0 100644 --- reth/crates/node/core/src/args/engine.rs +++ scroll-reth/crates/node/core/src/args/engine.rs @@ -108,6 +108,16 @@ /// Allow unwinding canonical header to ancestor during forkchoice updates. /// See `TreeConfig::unwind_canonical_header` for more details. #[arg(long = "engine.allow-unwind-canonical-header", default_value = "false")] pub allow_unwind_canonical_header: bool, + + /// Configure the number of storage proof workers in the Tokio blocking pool. + /// If not specified, defaults to 2x available parallelism, clamped between 2 and 64. + #[arg(long = "engine.storage-worker-count")] + pub storage_worker_count: Option<usize>, + + /// Configure the number of account proof workers in the Tokio blocking pool. + /// If not specified, defaults to the same count as storage workers. + #[arg(long = "engine.account-worker-count")] + pub account_worker_count: Option<usize>, }   #[allow(deprecated)] @@ -134,6 +144,8 @@ precompile_cache_disabled: false, state_root_fallback: false, always_process_payload_attributes_on_canonical_head: false, allow_unwind_canonical_header: false, + storage_worker_count: None, + account_worker_count: None, } } } @@ -141,7 +153,7 @@ impl EngineArgs { /// Creates a [`TreeConfig`] from the engine arguments. pub fn tree_config(&self) -> TreeConfig { - TreeConfig::default() + let mut config = TreeConfig::default() .with_persistence_threshold(self.persistence_threshold) .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) @@ -159,7 +171,17 @@ .with_state_root_fallback(self.state_root_fallback) .with_always_process_payload_attributes_on_canonical_head( self.always_process_payload_attributes_on_canonical_head, ) - .with_unwind_canonical_header(self.allow_unwind_canonical_header) + .with_unwind_canonical_header(self.allow_unwind_canonical_header); + + if let Some(count) = self.storage_worker_count { + config = config.with_storage_worker_count(count); + } + + if let Some(count) = self.account_worker_count { + config = config.with_account_worker_count(count); + } + + config } }
diff --git reth/crates/node/core/src/args/log.rs scroll-reth/crates/node/core/src/args/log.rs index 1236984fac02ea9a2fe25847959751ee10172ea2..99fefc11445cbb4c9ce7f1c308f0c0da4d1fece5 100644 --- reth/crates/node/core/src/args/log.rs +++ scroll-reth/crates/node/core/src/args/log.rs @@ -70,6 +70,7 @@ global = true, default_value_t = ColorMode::Always )] pub color: ColorMode, + /// The verbosity settings for the tracer. #[command(flatten)] pub verbosity: Verbosity,
diff --git reth/crates/node/core/src/args/metric.rs scroll-reth/crates/node/core/src/args/metric.rs new file mode 100644 index 0000000000000000000000000000000000000000..d46018b8e774d137b1ae9bb868d19d802c0843c8 --- /dev/null +++ scroll-reth/crates/node/core/src/args/metric.rs @@ -0,0 +1,13 @@ +use clap::Parser; +use reth_cli_util::parse_socket_address; +use std::net::SocketAddr; + +/// Metrics configuration. +#[derive(Debug, Clone, Default, Parser)] +pub struct MetricArgs { + /// Enable Prometheus metrics. + /// + /// The metrics will be served at the given interface and port. + #[arg(long="metrics", alias = "metrics.prometheus", value_name = "PROMETHEUS", value_parser = parse_socket_address, help_heading = "Metrics")] + pub prometheus: Option<SocketAddr>, +}
diff --git reth/crates/node/core/src/args/mod.rs scroll-reth/crates/node/core/src/args/mod.rs index 6799fe418dcb045b6f3b48c5a22066cd4206afe2..54e777401469cfc33870c5ea4694ca801c22ae9f 100644 --- reth/crates/node/core/src/args/mod.rs +++ scroll-reth/crates/node/core/src/args/mod.rs @@ -24,6 +24,14 @@ /// LogArgs struct for configuring the logger mod log; pub use log::{ColorMode, LogArgs, Verbosity};   +/// `TraceArgs` for tracing and spans support +mod trace; +pub use trace::TraceArgs; + +/// `MetricArgs` to configure metrics. +mod metric; +pub use metric::MetricArgs; + /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs;
diff --git reth/crates/node/core/src/args/network.rs scroll-reth/crates/node/core/src/args/network.rs index a32f14edd41ef6c3729852a18294b479102375e0..52ff52b1cee196f2cd1767c1308d832eb529cfa3 100644 --- reth/crates/node/core/src/args/network.rs +++ scroll-reth/crates/node/core/src/args/network.rs @@ -184,6 +184,10 @@ /// Comma separated list of required block hashes. /// Peers that don't have these blocks will be filtered out. #[arg(long = "required-block-hashes", value_delimiter = ',')] pub required_block_hashes: Vec<B256>, + + /// Optional network ID to override the chain specification's network ID for P2P connections + #[arg(long)] + pub network_id: Option<u64>, }   impl NetworkArgs { @@ -297,6 +301,7 @@ self.discovery.port, )) .disable_tx_gossip(self.disable_tx_gossip) .required_block_hashes(self.required_block_hashes.clone()) + .network_id(self.network_id) }   /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -371,6 +376,7 @@ tx_propagation_policy: TransactionPropagationKind::default(), disable_tx_gossip: false, propagation_mode: TransactionPropagationMode::Sqrt, required_block_hashes: vec![], + network_id: None, } } }
diff --git reth/crates/node/core/src/args/payload_builder.rs scroll-reth/crates/node/core/src/args/payload_builder.rs index f751bcc070ccf6629d9af6fcaf5d7b3959df6da0..d658241c21c9c4c06b7d7f143c045204fc209a8b 100644 --- reth/crates/node/core/src/args/payload_builder.rs +++ scroll-reth/crates/node/core/src/args/payload_builder.rs @@ -5,7 +5,7 @@ use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; -use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; +use reth_cli_util::parse_duration_from_secs_or_ms; use std::{borrow::Cow, ffi::OsStr, time::Duration};   /// Parameters for configuring the Payload Builder @@ -29,7 +29,7 @@ #[arg(long = "builder.interval", value_parser = parse_duration_from_secs_or_ms, default_value = "1", value_name = "DURATION")] pub interval: Duration,   /// The deadline for when the payload builder job should resolve. - #[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")] + #[arg(long = "builder.deadline", value_parser = parse_duration_from_secs_or_ms, default_value = "12s", value_name = "DEADLINE")] pub deadline: Duration,   /// Maximum number of tasks to spawn for building a payload.
diff --git reth/crates/node/core/src/args/trace.rs scroll-reth/crates/node/core/src/args/trace.rs new file mode 100644 index 0000000000000000000000000000000000000000..751ab556ac85321a2d7a33d6977ff0e57b2b5b53 --- /dev/null +++ scroll-reth/crates/node/core/src/args/trace.rs @@ -0,0 +1,61 @@ +//! Opentelemetry tracing configuration through CLI args. + +use clap::Parser; +use eyre::{ensure, WrapErr}; +use tracing::Level; +use url::Url; + +/// CLI arguments for configuring `Opentelemetry` trace and span export. +#[derive(Debug, Clone, Parser)] +pub struct TraceArgs { + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. + /// + /// If no value provided, defaults to `http://localhost:4318/v1/traces`. + /// + /// Example: --tracing-otlp=http://collector:4318/v1/traces + #[arg( + long = "tracing-otlp", + global = true, + value_name = "URL", + num_args = 0..=1, + default_missing_value = "http://localhost:4318/v1/traces", + require_equals = true, + value_parser = parse_otlp_endpoint, + help_heading = "Tracing" + )] + pub otlp: Option<Url>, + + /// Set the minimum log level for OTLP traces. + /// + /// Valid values: ERROR, WARN, INFO, DEBUG, TRACE + /// + /// Defaults to TRACE if not specified. + #[arg( + long = "tracing-otlp-level", + global = true, + value_name = "LEVEL", + default_value = "TRACE", + help_heading = "Tracing" + )] + pub otlp_level: Level, +} + +impl Default for TraceArgs { + fn default() -> Self { + Self { otlp: None, otlp_level: Level::TRACE } + } +} + +// Parses and validates an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result<Url> { + let url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; + + // OTLP url must end with `/v1/traces` per the OTLP specification. + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); + + Ok(url) +}
diff --git reth/crates/node/core/src/node_config.rs scroll-reth/crates/node/core/src/node_config.rs index 96fa8cc8dfa0029c348c56626a4945a9bf455624..94dbecb649cda633497835f0ae8fb456bc38d245 100644 --- reth/crates/node/core/src/node_config.rs +++ scroll-reth/crates/node/core/src/node_config.rs @@ -27,20 +27,16 @@ use reth_transaction_pool::TransactionPool; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs, - net::SocketAddr, path::{Path, PathBuf}, sync::Arc, }; use tracing::*;   -use crate::args::EraArgs; +use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_RESERVED_CPU_CORES, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; - -/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2;   /// Default size of cross-block cache in megabytes. pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: u64 = 4 * 1024; @@ -103,10 +99,8 @@ /// /// Possible values are either a built-in chain or the path to a chain specification file. pub chain: Arc<ChainSpec>,   - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - pub metrics: Option<SocketAddr>, + /// Enable to configure metrics export to endpoints + pub metrics: MetricArgs,   /// Add a new instance of a node. /// @@ -171,7 +165,7 @@ pub fn new(chain: Arc<ChainSpec>) -> Self { Self { config: None, chain, - metrics: None, + metrics: MetricArgs::default(), instance: None, network: NetworkArgs::default(), rpc: RpcServerArgs::default(), @@ -225,8 +219,8 @@ self }   /// Set the metrics address for the node - pub const fn with_metrics(mut self, metrics: SocketAddr) -> Self { - self.metrics = Some(metrics); + pub const fn with_metrics(mut self, metrics: MetricArgs) -> Self { + self.metrics = metrics; self }   @@ -517,7 +511,7 @@ fn clone(&self) -> Self { Self { chain: self.chain.clone(), config: self.config.clone(), - metrics: self.metrics, + metrics: self.metrics.clone(), instance: self.instance, network: self.network.clone(), rpc: self.rpc.clone(),
diff --git reth/crates/payload/builder/src/service.rs scroll-reth/crates/payload/builder/src/service.rs index f9530d003f552ce0f99cc27a5911944a5aee789d..f3f1b03ab2efd36f9d4f4a633329cad29d68bf11 100644 --- reth/crates/payload/builder/src/service.rs +++ scroll-reth/crates/payload/builder/src/service.rs @@ -512,7 +512,7 @@ Self::BestPayload(f0, f1) => { f.debug_tuple("BestPayload").field(&f0).field(&f1).finish() } Self::PayloadTimestamp(f0, f1) => { - f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() + f.debug_tuple("PayloadTimestamp").field(&f0).field(&f1).finish() } Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(),
diff --git reth/crates/payload/primitives/Cargo.toml scroll-reth/crates/payload/primitives/Cargo.toml index 670727e3c6d7302050611ddcde385dce0a335db0..e1b2bb61793a48d0f92f8a7f7e0767bc48e2cd71 100644 --- reth/crates/payload/primitives/Cargo.toml +++ scroll-reth/crates/payload/primitives/Cargo.toml @@ -23,6 +23,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["serde"] } op-alloy-rpc-types-engine = { workspace = true, optional = true, features = ["serde"] } +scroll-alloy-rpc-types-engine = { workspace = true, optional = true, features = ["serde"] }   # misc auto_impl.workspace = true @@ -46,8 +47,10 @@ "serde/std", "thiserror/std", "reth-primitives-traits/std", "either/std", + "scroll-alloy-rpc-types-engine?/std", ] op = [ "dep:op-alloy-rpc-types-engine", "reth-primitives-traits/op", ] +scroll-alloy-traits = ["dep:scroll-alloy-rpc-types-engine", "reth-primitives-traits/scroll-alloy-traits"]
diff --git reth/crates/payload/primitives/src/traits.rs scroll-reth/crates/payload/primitives/src/traits.rs index 39bd14cc63b04f52d73f8e0244e4807ab3a49eb8..70007ef200ea91912e873de0d9c148b1216440a6 100644 --- reth/crates/payload/primitives/src/traits.rs +++ scroll-reth/crates/payload/primitives/src/traits.rs @@ -137,6 +137,21 @@ self.payload_attributes.parent_beacon_block_root } }   +#[cfg(feature = "scroll-alloy-traits")] +impl PayloadAttributes for scroll_alloy_rpc_types_engine::ScrollPayloadAttributes { + fn timestamp(&self) -> u64 { + self.payload_attributes.timestamp + } + + fn withdrawals(&self) -> Option<&Vec<Withdrawal>> { + self.payload_attributes.withdrawals.as_ref() + } + + fn parent_beacon_block_root(&self) -> Option<B256> { + self.payload_attributes.parent_beacon_block_root + } +} + /// Factory trait for creating payload attributes. /// /// Enables different strategies for generating payload attributes based on
diff --git reth/crates/ress/provider/src/lib.rs scroll-reth/crates/ress/provider/src/lib.rs index 599b37962f0107a5a196d818e580c78355afe688..da3c5190902d34f4d0b84a599d1e5726b2b240d5 100644 --- reth/crates/ress/provider/src/lib.rs +++ scroll-reth/crates/ress/provider/src/lib.rs @@ -150,7 +150,7 @@ // We allow block execution to fail, since we still want to record all accessed state by // invalid blocks. if let Err(error) = self.evm_config.batch_executor(&mut db).execute_with_state_closure( &block, - |state: &State<_>| { + |state: &mut State<_>| { record.record_executed_state(state); }, ) {
diff --git reth/crates/rpc/rpc-builder/src/auth.rs scroll-reth/crates/rpc/rpc-builder/src/auth.rs index 777081a7e6f3dcc3f64a0242393e98e2365dfa3f..0d0a6165ff75a1c2f966acb12b84d02810b30afa 100644 --- reth/crates/rpc/rpc-builder/src/auth.rs +++ scroll-reth/crates/rpc/rpc-builder/src/auth.rs @@ -354,7 +354,9 @@ /// Returns a http client connected to the server. /// /// This client uses the JWT token to authenticate requests. - pub fn http_client(&self) -> impl SubscriptionClientT + Clone + Send + Sync + Unpin + 'static { + pub fn http_client( + &self, + ) -> impl SubscriptionClientT + use<> + Clone + Send + Sync + Unpin + 'static { // Create a middleware that adds a new JWT token to every request. let secret_layer = AuthClientLayer::new(self.secret); let middleware = tower::ServiceBuilder::default().layer(secret_layer);
diff --git reth/crates/rpc/rpc-convert/Cargo.toml scroll-reth/crates/rpc/rpc-convert/Cargo.toml index af43e9c54a2063495de696333554dbfa0bf6257b..18a5243d769d0347be027f18ea4d55e27a070c4d 100644 --- reth/crates/rpc/rpc-convert/Cargo.toml +++ scroll-reth/crates/rpc/rpc-convert/Cargo.toml @@ -33,6 +33,13 @@ op-alloy-network = { workspace = true, optional = true } reth-optimism-primitives = { workspace = true, optional = true } op-revm = { workspace = true, optional = true }   +# scroll +scroll-alloy-consensus = { workspace = true, optional = true } +scroll-alloy-evm = { workspace = true, optional = true } +scroll-alloy-rpc-types = { workspace = true, optional = true } +reth-scroll-primitives = { workspace = true, optional = true } +revm-scroll = { workspace = true, optional = true } + # revm revm-context.workspace = true   @@ -60,3 +67,13 @@ "dep:op-revm", "reth-evm/op", "reth-primitives-traits/op", ] +scroll = [ + "dep:scroll-alloy-consensus", + "dep:scroll-alloy-evm", + "dep:scroll-alloy-rpc-types", + "dep:reth-scroll-primitives", + "dep:reth-storage-api", + "dep:revm-scroll", + "reth-evm/scroll-alloy-traits", + "reth-primitives-traits/scroll-alloy-traits", +]
diff --git reth/crates/rpc/rpc-convert/src/lib.rs scroll-reth/crates/rpc/rpc-convert/src/lib.rs index 9844b17b60435af6f89caea9a95948f77fd7660a..c1a1c457cb86cfd50c00ff4d3dfc5a1c73ee3a20 100644 --- reth/crates/rpc/rpc-convert/src/lib.rs +++ scroll-reth/crates/rpc/rpc-convert/src/lib.rs @@ -27,3 +27,6 @@ };   #[cfg(feature = "op")] pub use transaction::op::*; + +#[cfg(feature = "scroll")] +pub use transaction::scroll::*;
diff --git reth/crates/rpc/rpc-convert/src/rpc.rs scroll-reth/crates/rpc/rpc-convert/src/rpc.rs index cf67bc11add7471d7124e69c5a0fe94b78e58079..38cc00c83097af0256e826fc3e265eec14e1273d 100644 --- reth/crates/rpc/rpc-convert/src/rpc.rs +++ scroll-reth/crates/rpc/rpc-convert/src/rpc.rs @@ -100,3 +100,35 @@ Ok(tx.into_signed(signature).into()) } } + +#[cfg(feature = "scroll")] +impl SignableTxRequest<scroll_alloy_consensus::ScrollTxEnvelope> + for scroll_alloy_rpc_types::ScrollTransactionRequest +{ + async fn try_build_and_sign( + self, + signer: impl TxSigner<Signature> + Send, + ) -> Result<scroll_alloy_consensus::ScrollTxEnvelope, SignTxRequestError> { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + scroll_alloy_consensus::ScrollTypedTransaction::Legacy(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Legacy(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip2930(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip2930(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip1559(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip1559(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip7702(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip7702(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::L1Message(_) => { + return Err(SignTxRequestError::InvalidTransactionRequest); + } + }; + Ok(signed) + } +}
diff --git reth/crates/rpc/rpc-convert/src/transaction.rs scroll-reth/crates/rpc/rpc-convert/src/transaction.rs index b8fb25c66c4f39893d7ffb1f0de4dfcbc895ca5f..ccab1bbbe99fadc7ca59969c381836720f7b5c90 100644 --- reth/crates/rpc/rpc-convert/src/transaction.rs +++ scroll-reth/crates/rpc/rpc-convert/src/transaction.rs @@ -17,7 +17,7 @@ use core::error; use dyn_clone::DynClone; use reth_evm::{ revm::context_interface::{either::Either, Block}, - ConfigureEvm, SpecFor, TxEnvFor, + BlockEnvFor, ConfigureEvm, EvmEnvFor, TxEnvFor, }; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, SealedBlock, SealedHeader, SealedHeaderFor, TransactionMeta, @@ -123,18 +123,15 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. type Primitives: NodePrimitives;   + /// The EVM configuration. + type Evm: ConfigureEvm<Primitives = Self::Primitives>; + /// Associated upper layer JSON-RPC API network requests and responses to convert from and into /// types of [`Self::Primitives`]. type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug;   - /// A set of variables for executing a transaction. - type TxEnv; - /// An associated RPC conversion error. type Error: error::Error + Into<jsonrpsee_types::ErrorObject<'static>>; - - /// The EVM specification identifier. - type Spec;   /// Wrapper for `fill()` with default `TransactionInfo` /// Create a new rpc transaction result for a _pending_ signed transaction, setting block @@ -169,9 +166,8 @@ /// `cfg_env` and `block_env`. fn tx_env( &self, request: RpcTxReq<Self::Network>, - cfg_env: &CfgEnv<Self::Spec>, - block_env: &BlockEnv, - ) -> Result<Self::TxEnv, Self::Error>; + evm_env: &EvmEnvFor<Self::Evm>, + ) -> Result<TxEnvFor<Self::Evm>, Self::Error>;   /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all /// receipts are from the same block. @@ -199,8 +195,8 @@ ) -> Result<RpcHeader<Self::Network>, Self::Error>; }   dyn_clone::clone_trait_object!( - <Primitives, Network, Error, TxEnv, Spec> - RpcConvert<Primitives = Primitives, Network = Network, Error = Error, TxEnv = TxEnv, Spec = Spec> + <Primitives, Network, Error, Evm> + RpcConvert<Primitives = Primitives, Network = Network, Error = Error, Evm = Evm> );   /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -439,7 +435,7 @@ /// One should prefer to implement [`TryIntoTxEnv`] for `TxReq` to get the `TxEnvConverter` /// implementation for free, thanks to the blanket implementation, unless the conversion requires /// more context. For example, some configuration parameters or access handles to database, network, /// etc. -pub trait TxEnvConverter<TxReq, TxEnv, Spec>: +pub trait TxEnvConverter<TxReq, Evm: ConfigureEvm>: Debug + Send + Sync + Unpin + Clone + 'static { /// An associated error that can occur during conversion. @@ -451,31 +447,30 @@ /// See [`TxEnvConverter`] for more information. fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv<Spec>, - block_env: &BlockEnv, - ) -> Result<TxEnv, Self::Error>; + evm_env: &EvmEnvFor<Evm>, + ) -> Result<TxEnvFor<Evm>, Self::Error>; }   -impl<TxReq, TxEnv, Spec> TxEnvConverter<TxReq, TxEnv, Spec> for () +impl<TxReq, Evm> TxEnvConverter<TxReq, Evm> for () where - TxReq: TryIntoTxEnv<TxEnv>, + TxReq: TryIntoTxEnv<TxEnvFor<Evm>, BlockEnvFor<Evm>>, + Evm: ConfigureEvm, { type Error = TxReq::Err;   fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv<Spec>, - block_env: &BlockEnv, - ) -> Result<TxEnv, Self::Error> { - tx_req.try_into_tx_env(cfg_env, block_env) + evm_env: &EvmEnvFor<Evm>, + ) -> Result<TxEnvFor<Evm>, Self::Error> { + tx_req.try_into_tx_env(&evm_env.cfg_env, &evm_env.block_env) } }   /// Converts rpc transaction requests into transaction environment using a closure. -impl<F, TxReq, TxEnv, E, Spec> TxEnvConverter<TxReq, TxEnv, Spec> for F +impl<F, TxReq, E, Evm> TxEnvConverter<TxReq, Evm> for F where - F: Fn(TxReq, &CfgEnv<Spec>, &BlockEnv) -> Result<TxEnv, E> + F: Fn(TxReq, &EvmEnvFor<Evm>) -> Result<TxEnvFor<Evm>, E> + Debug + Send + Sync @@ -483,6 +478,7 @@ + Unpin + Clone + 'static, TxReq: Clone, + Evm: ConfigureEvm, E: error::Error + Send + Sync + 'static, { type Error = E; @@ -490,17 +486,16 @@ fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv<Spec>, - block_env: &BlockEnv, - ) -> Result<TxEnv, Self::Error> { - self(tx_req, cfg_env, block_env) + evm_env: &EvmEnvFor<Evm>, + ) -> Result<TxEnvFor<Evm>, Self::Error> { + self(tx_req, evm_env) } }   /// Converts `self` into `T`. /// /// Should create an executable transaction environment using [`TransactionRequest`]. -pub trait TryIntoTxEnv<T> { +pub trait TryIntoTxEnv<T, BlockEnv = reth_evm::revm::context::BlockEnv> { /// An associated error that can occur during the conversion. type Err;   @@ -836,7 +831,6 @@ } }   /// Converts `self` into a boxed converter. - #[expect(clippy::type_complexity)] pub fn erased( self, ) -> Box< @@ -844,8 +838,7 @@ dyn RpcConvert< Primitives = <Self as RpcConvert>::Primitives, Network = <Self as RpcConvert>::Network, Error = <Self as RpcConvert>::Error, - TxEnv = <Self as RpcConvert>::TxEnv, - Spec = <Self as RpcConvert>::Spec, + Evm = <Self as RpcConvert>::Evm, >, > where @@ -933,13 +926,12 @@ Map: TxInfoMapper<TxTy<N>> + Clone + Debug + Unpin + Send + Sync + 'static, SimTx: SimTxConverter<RpcTxReq<Network>, TxTy<N>>, RpcTx: RpcTxConverter<TxTy<N>, Network::TransactionResponse, <Map as TxInfoMapper<TxTy<N>>>::Out>, - TxEnv: TxEnvConverter<RpcTxReq<Network>, TxEnvFor<Evm>, SpecFor<Evm>>, + TxEnv: TxEnvConverter<RpcTxReq<Network>, Evm>, { type Primitives = N; + type Evm = Evm; type Network = Network; - type TxEnv = TxEnvFor<Evm>; type Error = Receipt::Error; - type Spec = SpecFor<Evm>;   fn fill( &self, @@ -965,10 +957,9 @@ fn tx_env( &self, request: RpcTxReq<Network>, - cfg_env: &CfgEnv<SpecFor<Evm>>, - block_env: &BlockEnv, - ) -> Result<Self::TxEnv, Self::Error> { - self.tx_env_converter.convert_tx_env(request, cfg_env, block_env).map_err(Into::into) + evm_env: &EvmEnvFor<Evm>, + ) -> Result<TxEnvFor<Evm>, Self::Error> { + self.tx_env_converter.convert_tx_env(request, evm_env).map_err(Into::into) }   fn convert_receipts( @@ -992,6 +983,83 @@ header: SealedHeaderFor<Self::Primitives>, block_size: usize, ) -> Result<RpcHeader<Self::Network>, Self::Error> { Ok(self.header_converter.convert_header(header, block_size)?) + } +} + +/// Scroll specific RPC transaction compatibility implementations. +#[cfg(feature = "scroll")] +pub mod scroll { + use super::*; + use alloy_consensus::{transaction::TxHashRef, SignableTransaction}; + use alloy_primitives::{Address, Bytes, Signature}; + use reth_scroll_primitives::ScrollReceipt; + use reth_storage_api::{errors::ProviderError, ReceiptProvider}; + use revm_scroll::l1block::TX_L1_FEE_PRECISION_U256; + use scroll_alloy_consensus::{ScrollAdditionalInfo, ScrollTransactionInfo, ScrollTxEnvelope}; + use scroll_alloy_rpc_types::ScrollTransactionRequest; + + /// Creates [`ScrollTransactionInfo`] by adding [`ScrollAdditionalInfo`] to [`TransactionInfo`] + /// if `tx` is not a L1 message. + pub fn try_into_scroll_tx_info<T: ReceiptProvider<Receipt = ScrollReceipt>>( + provider: &T, + tx: &ScrollTxEnvelope, + tx_info: TransactionInfo, + ) -> Result<ScrollTransactionInfo, ProviderError> { + let additional_info = if tx.is_l1_message() { + None + } else { + provider + .receipt_by_hash(*tx.tx_hash())? + .map(|receipt| ScrollAdditionalInfo { l1_fee: receipt.l1_fee() }) + } + .unwrap_or_default(); + + Ok(ScrollTransactionInfo::new(tx_info, additional_info)) + } + + impl FromConsensusTx<ScrollTxEnvelope> for scroll_alloy_rpc_types::Transaction { + type TxInfo = ScrollTransactionInfo; + type Err = Infallible; + + fn from_consensus_tx( + tx: ScrollTxEnvelope, + signer: Address, + tx_info: Self::TxInfo, + ) -> Result<Self, Self::Err> { + Ok(Self::from_transaction(Recovered::new_unchecked(tx, signer), tx_info)) + } + } + + impl TryIntoSimTx<ScrollTxEnvelope> for ScrollTransactionRequest { + fn try_into_sim_tx(self) -> Result<ScrollTxEnvelope, ValueError<Self>> { + let tx = self + .build_typed_tx() + .map_err(|request| ValueError::new(request, "Required fields missing"))?; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + + Ok(tx.into_signed(signature).into()) + } + } + + impl TryIntoTxEnv<scroll_alloy_evm::ScrollTransactionIntoTxEnv<TxEnv>> + for ScrollTransactionRequest + { + type Err = EthTxEnvError; + + fn try_into_tx_env<Spec>( + self, + cfg_env: &CfgEnv<Spec>, + block_env: &BlockEnv, + ) -> Result<scroll_alloy_evm::ScrollTransactionIntoTxEnv<TxEnv>, Self::Err> { + Ok(scroll_alloy_evm::ScrollTransactionIntoTxEnv::new( + self.as_ref().clone().try_into_tx_env(cfg_env, block_env)?, + Some(Bytes::new()), + Some(TX_L1_FEE_PRECISION_U256), + Some(0), + )) + } } }
diff --git reth/crates/rpc/rpc-eth-api/Cargo.toml scroll-reth/crates/rpc/rpc-eth-api/Cargo.toml index 44637d1931c329f281744301250fcd7fdd2bc91f..3ea12d16a6397fbd3fb13722822a0658a278b3f8 100644 --- reth/crates/rpc/rpc-eth-api/Cargo.toml +++ scroll-reth/crates/rpc/rpc-eth-api/Cargo.toml @@ -31,6 +31,9 @@ reth-network-api.workspace = true reth-node-api.workspace = true reth-trie-common = { workspace = true, features = ["eip1186"] }   +# scroll +reth-scroll-evm = { workspace = true, optional = true } + # ethereum alloy-evm = { workspace = true, features = ["overrides", "call-util"] } alloy-rlp.workspace = true @@ -68,3 +71,4 @@ "reth-primitives-traits/op", "reth-rpc-convert/op", "alloy-evm/op", ] +scroll = ["reth-scroll-evm", "reth-rpc-convert/scroll"]
diff --git reth/crates/rpc/rpc-eth-api/src/helpers/call.rs scroll-reth/crates/rpc/rpc-eth-api/src/helpers/call.rs index b96dab882a0ca43cb1cb983d92f8ddbf7d675012..8f325e757f1fa17ce7cbfdfe6599b710539054fa 100644 --- reth/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ scroll-reth/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,8 +20,8 @@ }; use futures::Future; use reth_errors::{ProviderError, RethError}; use reth_evm::{ - ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv, - TxEnvFor, + env::BlockEnvironment, ConfigureEvm, Evm, EvmEnvFor, HaltReasonFor, InspectorFor, + TransactionEnv, TxEnvFor, }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; @@ -38,6 +38,7 @@ EthApiError, RevertError, StateCacheDb, }; use reth_storage_api::{BlockIdReader, ProviderTx}; use revm::{ + context::Block, context_interface::{ result::{ExecutionResult, ResultAndState}, Transaction, @@ -115,7 +116,7 @@ // If not explicitly required, we disable nonce check <https://github.com/paradigmxyz/reth/issues/16108> evm_env.cfg_env.disable_nonce_check = true; evm_env.cfg_env.disable_base_fee = true; evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; }   let SimBlock { block_overrides, state_overrides, calls } = block; @@ -123,19 +124,23 @@ if let Some(block_overrides) = block_overrides { // ensure we don't allow uncapped gas limit per block if let Some(gas_limit_override) = block_overrides.gas_limit && - gas_limit_override > evm_env.block_env.gas_limit && + gas_limit_override > evm_env.block_env.gas_limit() && gas_limit_override > this.call_gas_limit() { return Err(EthApiError::other(EthSimulateError::GasLimitReached).into()) } - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides( + block_overrides, + &mut db, + evm_env.block_env.inner_mut(), + ); } if let Some(state_overrides) = state_overrides { apply_state_overrides(state_overrides, &mut db) .map_err(Self::Error::from_eth_err)?; }   - let block_gas_limit = evm_env.block_env.gas_limit; + let block_gas_limit = evm_env.block_env.gas_limit(); let chain_id = evm_env.cfg_env.chain_id;   let default_gas_limit = { @@ -404,7 +409,7 @@ if request.as_ref().gas_limit().is_none() && tx_env.gas_price() > 0 { let cap = this.caller_gas_allowance(&mut db, &evm_env, &tx_env)?; // no gas limit was provided in the request, so we need to cap the request's gas // limit - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); }   // can consume the list since we're not using the request anymore @@ -461,7 +466,7 @@ /// Executes code on state. pub trait Call: LoadState< - RpcConvert: RpcConvert<TxEnv = TxEnvFor<Self::Evm>, Spec = SpecFor<Self::Evm>>, + RpcConvert: RpcConvert<Evm = Self::Evm>, Error: FromEvmError<Self::Evm> + From<<Self::RpcConvert as RpcConvert>::Error> + From<ProviderError>, @@ -520,7 +525,7 @@ Ok(res) }   - /// Executes the [`EvmEnv`] against the given [Database] without committing state + /// Executes the [`reth_evm::EvmEnv`] against the given [Database] without committing state /// changes. fn transact_with_inspector<DB, I>( &self, @@ -574,7 +579,7 @@ /// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and /// executes the closure on a new task returning the result of the closure. /// - /// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at + /// This returns the configured [`reth_evm::EvmEnv`] for the given [`RpcTxReq`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. /// /// This is primarily used by `eth_call`. @@ -712,10 +717,10 @@ }   /// /// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are - /// `None`, they fall back to the [`EvmEnv`]'s settings. + /// `None`, they fall back to the [`reth_evm::EvmEnv`]'s settings. fn create_txn_env( &self, - evm_env: &EvmEnv<SpecFor<Self::Evm>>, + evm_env: &EvmEnvFor<Self::Evm>, mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>, mut db: impl Database<Error: Into<EthApiError>>, ) -> Result<TxEnvFor<Self::Evm>, Self::Error> { @@ -728,10 +733,10 @@ .unwrap_or_default(); request.as_mut().set_nonce(nonce); }   - Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?) + Ok(self.tx_resp_builder().tx_env(request, evm_env)?) }   - /// Prepares the [`EvmEnv`] for execution of calls. + /// Prepares the [`reth_evm::EvmEnv`] for execution of calls. /// /// Does not commit any changes to the underlying database. /// @@ -790,7 +795,7 @@ // set nonce to None so that the correct nonce is chosen by the EVM request.as_mut().take_nonce();   if let Some(block_overrides) = overrides.block { - apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); + apply_block_overrides(*block_overrides, db, evm_env.block_env.inner_mut()); } if let Some(state_overrides) = overrides.state { apply_state_overrides(state_overrides, db) @@ -801,7 +806,7 @@ let mut tx_env = self.create_txn_env(&evm_env, request, &mut *db)?;   // lower the basefee to 0 to avoid breaking EVM invariants (basefee < gasprice): <https://github.com/ethereum/go-ethereum/blob/355228b011ef9a85ebc0f21e7196f892038d49f0/internal/ethapi/api.go#L700-L704> if tx_env.gas_price() == 0 { - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; }   if !request_has_gas_limit { @@ -811,7 +816,7 @@ // If gas price is specified, cap transaction gas limit with caller allowance trace!(target: "rpc::eth::call", ?tx_env, "Applying gas limit cap with caller allowance"); let cap = self.caller_gas_allowance(db, &evm_env, &tx_env)?; // ensure we cap gas_limit to the block's - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } }
diff --git reth/crates/rpc/rpc-eth-api/src/helpers/estimate.rs scroll-reth/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index cca674e9739195c94c3aab6bdec44b9c82c44fae..cd2518345ce162294e91536a755491cf9567ebb6 100644 --- reth/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ scroll-reth/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -18,7 +18,10 @@ EthApiError, RevertError, RpcInvalidTransactionError, }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use reth_storage_api::StateProvider; -use revm::context_interface::{result::ExecutionResult, Transaction}; +use revm::{ + context::Block, + context_interface::{result::ExecutionResult, Transaction}, +}; use tracing::trace;   /// Gas execution estimates @@ -60,10 +63,10 @@ // Keep a copy of gas related request values let tx_request_gas_limit = request.as_ref().gas_limit(); let tx_request_gas_price = request.as_ref().gas_price(); // the gas limit of the corresponding block - let max_gas_limit = evm_env - .cfg_env - .tx_gas_limit_cap - .map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit)); + let max_gas_limit = evm_env.cfg_env.tx_gas_limit_cap.map_or_else( + || evm_env.block_env.gas_limit(), + |cap| cap.min(evm_env.block_env.gas_limit()), + );   // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit.
diff --git reth/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs scroll-reth/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 94dc214b6c8a2ae767f4bb89cd29573751b662d1..cbbb6d6c6791613e56f91695f9d6fb68e1b41b67 100644 --- reth/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ scroll-reth/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -13,7 +13,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome, ExecutionOutcome}, - ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, + ConfigureEvm, Evm, NextBlockEnvAttributes, }; use reth_primitives_traits::{transaction::error::InvalidTransactionError, HeaderTy, SealedHeader}; use reth_revm::{database::StateProviderDatabase, db::State}; @@ -23,8 +23,8 @@ block::BlockAndReceipts, builder::config::PendingBlockKind, EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin, }; use reth_storage_api::{ - noop::NoopProvider, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, - ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderBox, StateProviderFactory, + noop::NoopProvider, BlockReader, BlockReaderIdExt, ProviderHeader, ProviderTx, ReceiptProvider, + StateProviderBox, StateProviderFactory, }; use reth_transaction_pool::{ error::InvalidPoolTransactionError, BestTransactions, BestTransactionsAttributes, @@ -61,17 +61,7 @@ /// Configures the [`PendingBlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - #[expect(clippy::type_complexity)] - fn pending_block_env_and_cfg( - &self, - ) -> Result< - PendingBlockEnv< - ProviderBlock<Self::Provider>, - ProviderReceipt<Self::Provider>, - SpecFor<Self::Evm>, - >, - Self::Error, - > { + fn pending_block_env_and_cfg(&self) -> Result<PendingBlockEnv<Self::Evm>, Self::Error> { if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? && let Some(receipts) = self .provider() @@ -166,7 +156,7 @@ // Is the pending block cached? if let Some(pending_block) = lock.as_ref() { // Is the cached block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && + if pending.evm_env.block_env.number() == U256::from(pending_block.block().number()) && parent.hash() == pending_block.block().parent_hash() && now <= pending_block.expires_at { @@ -265,14 +255,14 @@ .blob_params_at_timestamp(parent.timestamp()) .unwrap_or_else(BlobParams::cancun); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; - let block_gas_limit: u64 = block_env.gas_limit; + let block_gas_limit: u64 = block_env.gas_limit();   // Only include transactions if not configured as Empty if !self.pending_block_kind().is_empty() { let mut best_txs = self .pool() .best_transactions_with_attributes(BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|gasprice| gasprice as u64), )) // freeze to get a block as fast as possible @@ -430,3 +420,17 @@ withdrawals: parent.withdrawals_root().map(|_| Default::default()), } } } + +#[cfg(feature = "scroll")] +impl<H: alloy_consensus::BlockHeader> BuildPendingEnv<H> + for reth_scroll_evm::ScrollNextBlockEnvAttributes +{ + fn build_pending_env(parent: &reth_primitives_traits::SealedHeader<H>) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(1), + suggested_fee_recipient: parent.beneficiary(), + gas_limit: parent.gas_limit(), + base_fee: parent.base_fee_per_gas().unwrap_or_default(), + } + } +}
diff --git reth/crates/rpc/rpc-eth-api/src/helpers/trace.rs scroll-reth/crates/rpc/rpc-eth-api/src/helpers/trace.rs index a3c79416cfef222074f1edf5c4bc9c9a5f121c16..86039e3808255d74a9729046bc9b397b602e4573 100644 --- reth/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ scroll-reth/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -19,14 +19,14 @@ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, EthApiError, }; use reth_storage_api::{ProviderBlock, ProviderTx}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit}; +use revm::{context::Block, context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc;   /// Executes CPU heavy tasks. pub trait Trace: LoadState<Error: FromEvmError<Self::Evm>> { - /// Executes the [`TxEnvFor`] with [`EvmEnvFor`] against the given [Database] without committing - /// state changes. + /// Executes the [`TxEnvFor`] with [`reth_evm::EvmEnv`] against the given [Database] without + /// committing state changes. fn inspect<DB, I>( &self, db: DB, @@ -301,8 +301,8 @@ // on top of its parent block's state let state_at = block.parent_hash(); let block_hash = block.hash();   - let block_number = evm_env.block_env.number.saturating_to(); - let base_fee = evm_env.block_env.basefee; + let block_number = evm_env.block_env.number().saturating_to(); + let base_fee = evm_env.block_env.basefee();   // now get the state let state = this.state_at_block_id(state_at.into()).await?;
diff --git reth/crates/rpc/rpc-eth-types/src/error/mod.rs scroll-reth/crates/rpc/rpc-eth-types/src/error/mod.rs index 1f3ee7dd6dd98d6951d99a0de20a52fdc82ac95a..196461d18ce489c302a4577014a18b6d125cbe37 100644 --- reth/crates/rpc/rpc-eth-types/src/error/mod.rs +++ scroll-reth/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -681,7 +681,7 @@ /// Converts the halt error /// /// Takes the configured gas limit of the transaction which is attached to the error - pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self { + pub fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), HaltReason::NonceOverflow => Self::NonceMaxValue, @@ -762,7 +762,7 @@ InvalidTransaction::MaxFeePerBlobGasNotSupported => Self::MaxFeePerBlobGasNotSupported, InvalidTransaction::BlobVersionedHashesNotSupported => { Self::BlobVersionedHashesNotSupported } - InvalidTransaction::BlobGasPriceGreaterThanMax => Self::BlobFeeCapTooLow, + InvalidTransaction::BlobGasPriceGreaterThanMax { .. } => Self::BlobFeeCapTooLow, InvalidTransaction::EmptyBlobs => Self::BlobTransactionMissingBlobHashes, InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { have, .. } => Self::TooManyBlobs { have }, @@ -780,6 +780,7 @@ InvalidTransaction::Eip7873NotSupported => Self::TxTypeNotSupported, InvalidTransaction::Eip7873MissingTarget => { Self::other(internal_rpc_err(err.to_string())) } + InvalidTransaction::Str(_) => Self::other(internal_rpc_err(err.to_string())), } } }
diff --git reth/crates/rpc/rpc-eth-types/src/fee_history.rs scroll-reth/crates/rpc/rpc-eth-types/src/fee_history.rs index 3eaf69d2c4c245c2fe8939ff3b07c495ca0b7843..55abfbf50620c53a1d05db1a7970f7bc8dcab09c 100644 --- reth/crates/rpc/rpc-eth-types/src/fee_history.rs +++ scroll-reth/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -51,6 +51,7 @@ }   /// How the cache is configured. #[inline] + #[allow(clippy::missing_const_for_fn)] pub fn config(&self) -> &FeeHistoryCacheConfig { &self.inner.config }
diff --git reth/crates/rpc/rpc-eth-types/src/gas_oracle.rs scroll-reth/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 7bbf6433c6d426c90634c413a08f046472ebf3cd..2e5030b755aa4f48d3944fe267f48ee7b0c146ea 100644 --- reth/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ scroll-reth/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -3,7 +3,7 @@ //! previous blocks.   use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction, TxReceipt}; -use alloy_eips::BlockNumberOrTag; +use alloy_eips::{BlockNumberOrTag, Encodable2718}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; @@ -117,6 +117,7 @@ block_hash: B256::ZERO, price: oracle_config .default_suggested_fee .unwrap_or_else(|| GasPriceOracleResult::default().price), + is_at_capacity: false, }, lowest_effective_tip_cache: EffectiveTipLruCache(LruMap::new(ByLength::new( cached_values, @@ -210,7 +211,8 @@ { price = max_price; }   - inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price }; + inner.last_price = + GasPriceOracleResult { block_hash: header.hash(), price, ..Default::default() };   Ok(price) } @@ -344,11 +346,149 @@ { suggestion = max_price; }   - inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion }; + inner.last_price = GasPriceOracleResult { + block_hash: header.hash(), + price: suggestion, + ..Default::default() + };   Ok(suggestion) }   + /// Suggests a max priority fee value using a simplified and more predictable algorithm + /// appropriate for chains like Scroll with a single known block builder. + /// + /// It returns either: + /// - The minimum suggested priority fee when blocks have capacity + /// - 10% above the median effective priority fee from the last block when at capacity + /// + /// A block is considered at capacity if its total gas used plus the maximum single transaction + /// gas would exceed the block's gas limit, or the total block payload size plus the maximum + /// single transaction would exceed the block's payload size limit. + pub async fn scroll_suggest_tip_cap( + &self, + min_suggested_priority_fee: U256, + payload_size_limit: u64, + ) -> EthResult<U256> { + let (result, _) = self + .calculate_suggest_tip_cap( + BlockNumberOrTag::Latest, + min_suggested_priority_fee, + payload_size_limit, + ) + .await; + result + } + + /// Calculates a gas price suggestion and returns whether the block is at capacity. + /// + /// This method implements the core logic for suggesting gas prices based on block capacity. + /// It returns a tuple containing the suggested gas price and a boolean indicating + /// whether the latest block is at capacity (gas limit or payload size limit). + pub async fn calculate_suggest_tip_cap( + &self, + block_number_or_tag: BlockNumberOrTag, + min_suggested_priority_fee: U256, + payload_size_limit: u64, + ) -> (EthResult<U256>, bool) { + let header = match self.provider.sealed_header_by_number_or_tag(block_number_or_tag) { + Ok(Some(header)) => header, + Ok(None) => return (Err(EthApiError::HeaderNotFound(BlockId::latest())), false), + Err(e) => return (Err(e.into()), false), + }; + + let mut inner = self.inner.lock().await; + + // if we have stored a last price, then we check whether or not it was for the same head + if inner.last_price.block_hash == header.hash() { + return (Ok(inner.last_price.price), inner.last_price.is_at_capacity); + } + + let mut suggestion = min_suggested_priority_fee; + let mut is_at_capacity = false; + + // find the maximum gas used by any of the transactions in the block and + // the maximum and total payload size used by the transactions in the block to use as + // the capacity margin for the block, if no receipts or block are found return the + // suggested_min_priority_fee + let (block, receipts) = match self.cache.get_block_and_receipts(header.hash()).await { + Ok(Some((block, receipts))) => (block, receipts), + Ok(None) => return (Ok(suggestion), false), + Err(e) => return (Err(e.into()), false), + }; + + let mut max_tx_gas_used = 0u64; + let mut last_cumulative_gas = 0; + for receipt in receipts.as_ref() { + let cumulative_gas = receipt.cumulative_gas_used(); + // get the gas used by each transaction in the block, by subtracting the + // cumulative gas used of the previous transaction from the cumulative gas used of + // the current transaction. This is because there is no gas_used() + // method on the Receipt trait. + let gas_used = cumulative_gas - last_cumulative_gas; + max_tx_gas_used = max_tx_gas_used.max(gas_used); + last_cumulative_gas = cumulative_gas; + } + + let transactions = block.transactions_recovered(); + + // Calculate payload sizes for all transactions + let mut max_tx_payload_size = 0u64; + let mut total_payload_size = 0u64; + + for tx in transactions { + // Get the EIP-2718 encoded length as payload size + let payload_size = tx.encode_2718_len() as u64; + max_tx_payload_size = max_tx_payload_size.max(payload_size); + total_payload_size += payload_size; + } + + // sanity check the max gas used and transaction size value + if max_tx_gas_used > header.gas_limit() { + warn!(target: "scroll::gas_price_oracle", ?max_tx_gas_used, "Found tx consuming more gas than the block limit"); + return (Ok(suggestion), is_at_capacity); + } + if max_tx_payload_size > payload_size_limit { + warn!(target: "scroll::gas_price_oracle", ?max_tx_payload_size, "Found tx consuming more size than the block size limit"); + return (Ok(suggestion), is_at_capacity); + } + + // if the block is at capacity, the suggestion must be increased + if header.gas_used() + max_tx_gas_used > header.gas_limit() || + total_payload_size + max_tx_payload_size > payload_size_limit + { + let median_tip = match self.get_block_median_tip(header.hash()).await { + Ok(Some(median_tip)) => median_tip, + Ok(None) => return (Ok(suggestion), is_at_capacity), + Err(e) => return (Err(e), is_at_capacity), + }; + + let new_suggestion = median_tip + median_tip / U256::from(10); + + if new_suggestion > suggestion { + suggestion = new_suggestion; + } + is_at_capacity = true; + } + + // constrain to the max price + if let Some(max_price) = self.oracle_config.max_price && + suggestion > max_price + { + suggestion = max_price; + } + + // update the cache only if it's latest block header + if block_number_or_tag == BlockNumberOrTag::Latest { + inner.last_price = GasPriceOracleResult { + block_hash: header.hash(), + price: suggestion, + is_at_capacity, + }; + } + (Ok(suggestion), is_at_capacity) + } + /// Get the median tip value for the given block. This is useful for determining /// tips when a block is at capacity. /// @@ -413,11 +553,13 @@ /// The block hash that the oracle used to calculate the price pub block_hash: B256, /// The price that the oracle calculated pub price: U256, + /// Whether the latest block is at capacity + pub is_at_capacity: bool, }   impl Default for GasPriceOracleResult { fn default() -> Self { - Self { block_hash: B256::ZERO, price: U256::from(GWEI_TO_WEI) } + Self { block_hash: B256::ZERO, price: U256::from(GWEI_TO_WEI), is_at_capacity: false } } }
diff --git reth/crates/rpc/rpc-eth-types/src/pending_block.rs scroll-reth/crates/rpc/rpc-eth-types/src/pending_block.rs index 05ad6fb4e27f2e656265c76d27d7ebd8b7d4c73e..d0b5c65c1edb70fd7641f9eecbe9d51fc5d5a5a8 100644 --- reth/crates/rpc/rpc-eth-types/src/pending_block.rs +++ scroll-reth/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -13,18 +13,18 @@ use reth_chain_state::{ BlockState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, }; use reth_ethereum_primitives::Receipt; -use reth_evm::EvmEnv; +use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ Block, BlockTy, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, };   -/// Configured [`EvmEnv`] for a pending block. +/// Configured [`reth_evm::EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] -pub struct PendingBlockEnv<B: Block, R, Spec> { - /// Configured [`EvmEnv`] for the pending block. - pub evm_env: EvmEnv<Spec>, +pub struct PendingBlockEnv<Evm: ConfigureEvm> { + /// Configured [`reth_evm::EvmEnv`] for the pending block. + pub evm_env: EvmEnvFor<Evm>, /// Origin block for the config - pub origin: PendingBlockEnvOrigin<B, R>, + pub origin: PendingBlockEnvOrigin<BlockTy<Evm::Primitives>, ReceiptTy<Evm::Primitives>>, }   /// The origin for a configured [`PendingBlockEnv`]
diff --git reth/crates/rpc/rpc-eth-types/src/simulate.rs scroll-reth/crates/rpc/rpc-eth-types/src/simulate.rs index 5492e127b779afebb2c541b834513912d99af256..ec63443da3dad69d8476156526916d68c4aca79c 100644 --- reth/crates/rpc/rpc-eth-types/src/simulate.rs +++ scroll-reth/crates/rpc/rpc-eth-types/src/simulate.rs @@ -24,6 +24,7 @@ use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ + context::Block, context_interface::result::ExecutionResult, primitives::{Address, Bytes, TxKind}, Database, @@ -88,7 +89,7 @@ // correctness. let tx = resolve_transaction( call, default_gas_limit, - builder.evm().block().basefee, + builder.evm().block().basefee(), chain_id, builder.evm_mut().db_mut(), tx_resp_builder,
diff --git reth/crates/rpc/rpc/Cargo.toml scroll-reth/crates/rpc/rpc/Cargo.toml index c47c383f0576610276e7d9999e023814651bab48..8fc801b2a54267f4469fa8cb104532f30ffa0edc 100644 --- reth/crates/rpc/rpc/Cargo.toml +++ scroll-reth/crates/rpc/rpc/Cargo.toml @@ -66,6 +66,9 @@ alloy-serde.workspace = true revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } revm-primitives = { workspace = true, features = ["serde"] }   +# scroll +reth-scroll-evm = { workspace = true, optional = true } + # rpc jsonrpsee.workspace = true http.workspace = true @@ -107,3 +110,4 @@ jsonrpsee = { workspace = true, features = ["client"] }   [features] js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] +scroll = ["reth-scroll-evm"]
diff --git reth/crates/rpc/rpc/src/aliases.rs scroll-reth/crates/rpc/rpc/src/aliases.rs index 4e317305ca4ffa3aa5d87fe707913fd08e24e0d5..8854f1b607d002a3d0dec5a194515e4462a1a00a 100644 --- reth/crates/rpc/rpc/src/aliases.rs +++ scroll-reth/crates/rpc/rpc/src/aliases.rs @@ -1,4 +1,4 @@ -use reth_evm::{ConfigureEvm, SpecFor, TxEnvFor}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_types::EthApiError;   @@ -8,7 +8,6 @@ dyn RpcConvert< Primitives = <Evm as ConfigureEvm>::Primitives, Network = Network, Error = Error, - TxEnv = TxEnvFor<Evm>, - Spec = SpecFor<Evm>, + Evm = Evm, >, >;
diff --git reth/crates/rpc/rpc/src/debug.rs scroll-reth/crates/rpc/rpc/src/debug.rs index b3715c0e8e0119fda954f9a7dca7532da3526fc9..ccf753db3f415d22b9aad41359996cf1ba8fb35c 100644 --- reth/crates/rpc/rpc/src/debug.rs +++ scroll-reth/crates/rpc/rpc/src/debug.rs @@ -3,6 +3,7 @@ transaction::{SignerRecoverable, TxHashRef}, BlockHeader, }; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_evm::env::BlockEnvironment; use alloy_genesis::ChainConfig; use alloy_primitives::{uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; @@ -40,7 +41,7 @@ StateProofProvider, StateProviderFactory, StateRootProvider, TransactionVariant, }; use reth_tasks::pool::BlockingTaskGuard; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; -use revm::{context_interface::Transaction, state::EvmState, DatabaseCommit}; +use revm::{context::Block, context_interface::Transaction, state::EvmState, DatabaseCommit}; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; @@ -372,8 +373,8 @@ // <https://github.com/rust-lang/rust/issues/100013> let db = db.0;   let tx_info = TransactionInfo { - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), hash: None, block_hash: None, index: None, @@ -589,8 +590,8 @@ } results.push(trace); } // Increment block_env number and timestamp for the next bundle - evm_env.block_env.number += uint!(1_U256); - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().number += uint!(1_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256);   all_bundles.push(results); } @@ -649,11 +650,18 @@ let block_executor = this.eth_api().evm_config().executor(db);   let mut witness_record = ExecutionWitnessRecord::default();   + let mut withdraw_root_res: Result<_, reth_errors::ProviderError> = Ok(()); let _ = block_executor - .execute_with_state_closure(&block, |statedb: &State<_>| { + .execute_with_state_closure(&block, |statedb: &mut State<_>| { + #[cfg(feature = "scroll")] + { + use reth_scroll_evm::LoadWithdrawRoot; + withdraw_root_res = statedb.load_withdraw_root(); + } witness_record.record_executed_state(statedb); }) .map_err(|err| EthApiError::Internal(err.into()))?; + withdraw_root_res?;   let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } = witness_record; @@ -741,8 +749,8 @@ .as_ref() .map(|c| c.tx_index.map(|i| i as u64)) .unwrap_or_default(), block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), };   if let Some(tracer) = tracer {
diff --git reth/crates/rpc/rpc/src/eth/bundle.rs scroll-reth/crates/rpc/rpc/src/eth/bundle.rs index 48e3219daa35626354f9878811298c43e3f855c6..b541f232473088b64605c8b69e9a4a7e879d7522 100644 --- reth/crates/rpc/rpc/src/eth/bundle.rs +++ scroll-reth/crates/rpc/rpc/src/eth/bundle.rs @@ -2,12 +2,12 @@ //! `Eth` bundle implementation and helpers.   use alloy_consensus::{transaction::TxHashRef, EnvKzgSettings, Transaction as _}; use alloy_eips::eip7840::BlobParams; +use alloy_evm::env::BlockEnvironment; use alloy_primitives::{uint, Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ConfigureEvm, Evm}; - use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, @@ -18,7 +18,9 @@ use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{ EthBlobTransactionSidecar, EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, }; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::sync::Arc;   /// `Eth` bundle implementation. @@ -34,6 +36,7 @@ Self { inner: Arc::new(EthBundleInner { eth_api, blocking_task_guard }) } }   /// Access the underlying `Eth` API. + #[allow(clippy::missing_const_for_fn)] pub fn eth_api(&self) -> &Eth { &self.inner.eth_api } @@ -88,18 +91,18 @@ // Note: the block number is considered the `parent` block: <https://github.com/flashbots/mev-geth/blob/fddf97beec5877483f879a77b7dea2e58a58d653/internal/ethapi/api.go#L2104> let (mut evm_env, at) = self.eth_api().evm_env_at(block_id).await?;   if let Some(coinbase) = coinbase { - evm_env.block_env.beneficiary = coinbase; + evm_env.block_env.inner_mut().beneficiary = coinbase; }   // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { - evm_env.block_env.timestamp = U256::from(timestamp); + evm_env.block_env.inner_mut().timestamp = U256::from(timestamp); } else { - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); }   if let Some(difficulty) = difficulty { - evm_env.block_env.difficulty = U256::from(difficulty); + evm_env.block_env.inner_mut().difficulty = U256::from(difficulty); }   // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob @@ -110,7 +113,7 @@ let blob_params = self .eth_api() .provider() .chain_spec() - .blob_params_at_timestamp(evm_env.block_env.timestamp.saturating_to()) + .blob_params_at_timestamp(evm_env.block_env.timestamp().saturating_to()) .unwrap_or_else(BlobParams::cancun); if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::<u64>() > blob_params.max_blob_gas_per_block() @@ -124,30 +127,30 @@ } }   // default to call gas limit unless user requests a smaller limit - evm_env.block_env.gas_limit = self.inner.eth_api.call_gas_limit(); + evm_env.block_env.inner_mut().gas_limit = self.inner.eth_api.call_gas_limit(); if let Some(gas_limit) = gas_limit { - if gas_limit > evm_env.block_env.gas_limit { + if gas_limit > evm_env.block_env.gas_limit() { return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() ) } - evm_env.block_env.gas_limit = gas_limit; + evm_env.block_env.inner_mut().gas_limit = gas_limit; }   if let Some(base_fee) = base_fee { - evm_env.block_env.basefee = base_fee.try_into().unwrap_or(u64::MAX); + evm_env.block_env.inner_mut().basefee = base_fee.try_into().unwrap_or(u64::MAX); }   - let state_block_number = evm_env.block_env.number; + let state_block_number = evm_env.block_env.number(); // use the block number of the request - evm_env.block_env.number = U256::from(block_number); + evm_env.block_env.inner_mut().number = U256::from(block_number);   let eth_api = self.eth_api().clone();   self.eth_api() .spawn_with_state_at_block(at, move |state| { - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); let db = CacheDB::new(StateProviderDatabase::new(state));   let initial_coinbase = db
diff --git reth/crates/rpc/rpc/src/eth/filter.rs scroll-reth/crates/rpc/rpc/src/eth/filter.rs index 01b6a94158fd8cd2ad7b7086e7932f173bb3cbd2..4c129546af271568dca8b6c0ac538f8c7811c6c7 100644 --- reth/crates/rpc/rpc/src/eth/filter.rs +++ scroll-reth/crates/rpc/rpc/src/eth/filter.rs @@ -157,6 +157,7 @@ eth_filter }   /// Returns all currently active filters + #[allow(clippy::missing_const_for_fn)] pub fn active_filters(&self) -> &ActiveFilters<RpcTransaction<Eth::NetworkTypes>> { &self.inner.active_filters }
diff --git reth/crates/rpc/rpc/src/eth/helpers/call.rs scroll-reth/crates/rpc/rpc/src/eth/helpers/call.rs index a76e146042d69c88df6117ce209f45bf582e78c4..abe06cb55ece982a4901470fbec6efd659c69c32 100644 --- reth/crates/rpc/rpc/src/eth/helpers/call.rs +++ scroll-reth/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,7 +1,6 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm.   use crate::EthApi; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, @@ -13,12 +12,7 @@ impl<N, Rpc> EthCall for EthApi<N, Rpc> where N: RpcNodeCore, EthApiError: FromEvmError<N::Evm>, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor<N::Evm>, - Spec = SpecFor<N::Evm>, - >, + Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, Evm = N::Evm>, { }   @@ -26,12 +20,7 @@ impl<N, Rpc> Call for EthApi<N, Rpc> where N: RpcNodeCore, EthApiError: FromEvmError<N::Evm>, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor<N::Evm>, - Spec = SpecFor<N::Evm>, - >, + Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, Evm = N::Evm>, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -48,11 +37,6 @@ impl<N, Rpc> EstimateCall for EthApi<N, Rpc> where N: RpcNodeCore, EthApiError: FromEvmError<N::Evm>, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor<N::Evm>, - Spec = SpecFor<N::Evm>, - >, + Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, Evm = N::Evm>, { }
diff --git reth/crates/rpc/rpc/src/eth/sim_bundle.rs scroll-reth/crates/rpc/rpc/src/eth/sim_bundle.rs index f704382175403844f34c9de6851d0dc5752e762d..fb7f69b61f53384b957efeec0b51d0e03f893d54 100644 --- reth/crates/rpc/rpc/src/eth/sim_bundle.rs +++ scroll-reth/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -2,7 +2,7 @@ //! `Eth` Sim bundle implementation and helpers.   use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_eips::BlockNumberOrTag; -use alloy_evm::overrides::apply_block_overrides; +use alloy_evm::{env::BlockEnvironment, overrides::apply_block_overrides}; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ @@ -22,7 +22,9 @@ use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_storage_api::ProviderTx; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::{sync::Arc, time::Duration}; use tracing::trace;   @@ -73,6 +75,7 @@ Self { inner: Arc::new(EthSimBundleInner { eth_api, blocking_task_guard }) } }   /// Access the underlying `Eth` API. + #[allow(clippy::missing_const_for_fn)] pub fn eth_api(&self) -> &Eth { &self.inner.eth_api } @@ -242,12 +245,12 @@ .eth_api .spawn_with_state_at_block(current_block_id, move |state| { // Setup environment let current_block_number = current_block.number(); - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); let mut db = CacheDB::new(StateProviderDatabase::new(state));   // apply overrides - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides(block_overrides, &mut db, evm_env.block_env.inner_mut());   let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) .map_err(EthApiError::from_eth_err)?
diff --git reth/crates/rpc/rpc/src/reth.rs scroll-reth/crates/rpc/rpc/src/reth.rs index 8f8decd7f4aac9b617dc848ab2ed4567b036787f..3aaa1ebc5e0b61269a0e29ba0364aa872704ff29 100644 --- reth/crates/rpc/rpc/src/reth.rs +++ scroll-reth/crates/rpc/rpc/src/reth.rs @@ -27,6 +27,7 @@ // === impl RethApi ===   impl<Provider> RethApi<Provider> { /// The provider that can interact with the chain. + #[allow(clippy::missing_const_for_fn)] pub fn provider(&self) -> &Provider { &self.inner.provider }
diff --git reth/crates/rpc/rpc/src/trace.rs scroll-reth/crates/rpc/rpc/src/trace.rs index 4ed42bc721d1eb16544f33544f0920510959fa4f..767082cc70064b20b6347529d0873dafc5b927f8 100644 --- reth/crates/rpc/rpc/src/trace.rs +++ scroll-reth/crates/rpc/rpc/src/trace.rs @@ -69,6 +69,7 @@ self.inner.blocking_task_guard.clone().acquire_owned().await }   /// Access the underlying `Eth` API. + #[allow(clippy::missing_const_for_fn)] pub fn eth_api(&self) -> &Eth { &self.inner.eth_api }
diff --git reth/crates/rpc/rpc/src/validation.rs scroll-reth/crates/rpc/rpc/src/validation.rs index d03846a4279ba48c5bcfb3c0c939537c17c69f65..663f4df276dac07efafa234704bfba8b46c9f5f1 100644 --- reth/crates/rpc/rpc/src/validation.rs +++ scroll-reth/crates/rpc/rpc/src/validation.rs @@ -16,7 +16,7 @@ use core::fmt; use jsonrpsee::core::RpcResult; use jsonrpsee_types::error::ErrorObject; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_consensus::{Consensus, FullConsensus}; +use reth_consensus::{Consensus, FullConsensus, HeaderValidator}; use reth_consensus_common::validation::MAX_RLP_BLOCK_SIZE; use reth_engine_primitives::PayloadValidator; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; @@ -208,7 +208,7 @@ let state_root = state_provider.state_root(state_provider.hashed_post_state(&output.state))?;   - if state_root != block.header().state_root() { + if self.consensus.validate_state_root(block.header(), state_root).is_err() { return Err(ConsensusError::BodyStateRootDiff( GotExpected { got: state_root, expected: block.header().state_root() }.into(), )
diff --git reth/crates/stateless/src/validation.rs scroll-reth/crates/stateless/src/validation.rs index 23308bcfa55d985e30abed1ba71e57ca3ccc6250..38b96d6bd0f31ce9910a1056833771a4cc020cd5 100644 --- reth/crates/stateless/src/validation.rs +++ scroll-reth/crates/stateless/src/validation.rs @@ -21,6 +21,9 @@ use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_trie_common::{HashedPostState, KeccakKeyHasher};   +/// BLOCKHASH ancestor lookup window limit per EVM (number of most recent blocks accessible). +const BLOCKHASH_ANCESTOR_LIMIT: usize = 256; + /// Errors that can occur during stateless validation. #[derive(Debug, thiserror::Error)] pub enum StatelessValidationError { @@ -174,6 +177,15 @@ .collect::<Result<_, _>>()?; // Sort the headers by their block number to ensure that they are in // ascending order. ancestor_headers.sort_by_key(|header| header.number()); + + // Enforce BLOCKHASH ancestor headers limit (256 most recent blocks) + let count = ancestor_headers.len(); + if count > BLOCKHASH_ANCESTOR_LIMIT { + return Err(StatelessValidationError::AncestorHeaderLimitExceeded { + count, + limit: BLOCKHASH_ANCESTOR_LIMIT, + }); + }   // Check that the ancestor headers form a contiguous chain and are not just random headers. let ancestor_hashes = compute_ancestor_hashes(&current_block, &ancestor_headers)?;
diff --git reth/crates/tracing-otlp/Cargo.toml scroll-reth/crates/tracing-otlp/Cargo.toml index 7b8b666116c5b9fe9a5a6cb446b30c05aa17542a..60cee0aa229e2d8255c7f5829048ca722eb512ab 100644 --- reth/crates/tracing-otlp/Cargo.toml +++ scroll-reth/crates/tracing-otlp/Cargo.toml @@ -9,13 +9,29 @@ repository.workspace = true exclude.workspace = true   [dependencies] -opentelemetry_sdk = "0.29.0" -opentelemetry = "0.29.1" -opentelemetry-otlp = "0.29.0" -tracing-opentelemetry = "0.30.0" +# obs +opentelemetry_sdk = { workspace = true, optional = true } +opentelemetry = { workspace = true, optional = true } +opentelemetry-otlp = { workspace = true, optional = true } +opentelemetry-semantic-conventions = { workspace = true, optional = true } +tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true -opentelemetry-semantic-conventions = "0.29.0" + +# misc +eyre.workspace = true +url.workspace = true   [lints] workspace = true + +[features] +default = ["otlp"] + +otlp = [ + "opentelemetry", + "opentelemetry_sdk", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "tracing-opentelemetry", +]
diff --git reth/crates/tracing-otlp/src/lib.rs scroll-reth/crates/tracing-otlp/src/lib.rs index 1de112cdb33d1e79787d2085931c9ed6c5fef434..07415ac2a653653c70f24da084481f838406e846 100644 --- reth/crates/tracing-otlp/src/lib.rs +++ scroll-reth/crates/tracing-otlp/src/lib.rs @@ -1,12 +1,16 @@ +#![cfg(feature = "otlp")] + //! Provides a tracing layer for `OpenTelemetry` that exports spans to an OTLP endpoint. //! //! This module simplifies the integration of `OpenTelemetry` tracing with OTLP export in Rust //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system.   -use opentelemetry::{trace::TracerProvider, KeyValue, Value}; -use opentelemetry_otlp::SpanExporter; +use eyre::{ensure, WrapErr}; +use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; +use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ + propagation::TraceContextPropagator, trace::{SdkTracer, SdkTracerProvider}, Resource, }; @@ -14,25 +18,73 @@ use opentelemetry_semantic_conventions::{attribute::SERVICE_VERSION, SCHEMA_URL}; use tracing::Subscriber; use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; +use url::Url;   /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing -/// with OTLP export. -pub fn layer<S>(service_name: impl Into<Value>) -> OpenTelemetryLayer<S, SdkTracer> +/// with OTLP export to an url. +pub fn span_layer<S>( + service_name: impl Into<Value>, + endpoint: &Url, +) -> eyre::Result<OpenTelemetryLayer<S, SdkTracer>> where for<'span> S: Subscriber + LookupSpan<'span>, { - let exporter = SpanExporter::builder().with_http().build().unwrap(); + global::set_text_map_propagator(TraceContextPropagator::new()); + + let resource = build_resource(service_name); + + let span_exporter = + SpanExporter::builder().with_http().with_endpoint(endpoint.to_string()).build()?;   - let resource = Resource::builder() + let tracer_provider = SdkTracerProvider::builder() + .with_resource(resource) + .with_batch_exporter(span_exporter) + .build(); + + global::set_tracer_provider(tracer_provider.clone()); + + let tracer = tracer_provider.tracer("reth-otlp"); + Ok(tracing_opentelemetry::layer().with_tracer(tracer)) +} + +// Builds OTLP resource with service information. +fn build_resource(service_name: impl Into<Value>) -> Resource { + Resource::builder() .with_service_name(service_name) .with_schema_url([KeyValue::new(SERVICE_VERSION, env!("CARGO_PKG_VERSION"))], SCHEMA_URL) - .build(); + .build() +} + +/// Destination for exported trace spans. +#[derive(Debug, Clone)] +pub enum TraceOutput { + /// Export traces as JSON to stdout. + Stdout, + /// Export traces to an OTLP collector at the specified URL. + Otlp(Url), +} + +impl TraceOutput { + /// Parses the trace output destination from a string. + /// + /// Returns `TraceOutput::Stdout` for "stdout", or `TraceOutput::Otlp` for valid OTLP URLs. + /// OTLP URLs must end with `/v1/traces` per the OTLP specification. + pub fn parse(s: &str) -> eyre::Result<Self> { + if s == "stdout" { + return Ok(Self::Stdout); + }   - let provider = - SdkTracerProvider::builder().with_resource(resource).with_batch_exporter(exporter).build(); + let url = Url::parse(s).wrap_err("Invalid URL for trace output")?;   - let tracer = provider.tracer("reth-otlp"); - tracing_opentelemetry::layer().with_tracer(tracer) + // OTLP specification requires the `/v1/traces` path for trace endpoints + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); + + Ok(Self::Otlp(url)) + } }
diff --git reth/crates/tracing/Cargo.toml scroll-reth/crates/tracing/Cargo.toml index a5c09c23a3590671dfa9fff33a22f49c14d2f752..8cf83e138cab5ae82ad028bd5424d44167d1ed56 100644 --- reth/crates/tracing/Cargo.toml +++ scroll-reth/crates/tracing/Cargo.toml @@ -12,11 +12,22 @@ [lints] workspace = true   [dependencies] +# reth +reth-tracing-otlp = { workspace = true, optional = true } + +# obs tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald.workspace = true tracing-logfmt.workspace = true + +# misc +clap = { workspace = true, features = ["derive"] } +eyre.workspace = true rolling-file.workspace = true -eyre.workspace = true -clap = { workspace = true, features = ["derive"] } +url = { workspace = true, optional = true } + +[features] +default = ["otlp"] +otlp = ["reth-tracing-otlp", "dep:url"]
diff --git reth/crates/tracing/src/layers.rs scroll-reth/crates/tracing/src/layers.rs index 5b9c93b5fb6b84f5d406b3a5df569ccbf7eeb38e..385c4fac51d96a84e3975447acc8ce105cb387a3 100644 --- reth/crates/tracing/src/layers.rs +++ scroll-reth/crates/tracing/src/layers.rs @@ -1,13 +1,13 @@ +use crate::formatter::LogFormat; +#[cfg(feature = "otlp")] +use reth_tracing_otlp::span_layer; +use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, path::{Path, PathBuf}, }; - -use rolling_file::{RollingConditionBasic, RollingFileAppender}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; - -use crate::formatter::LogFormat;   /// A worker guard returned by the file layer. /// @@ -122,6 +122,25 @@ let file_filter = build_env_filter(None, filter)?; let layer = format.apply(file_filter, None, Some(writer)); self.add_layer(layer); Ok(guard) + } + + /// Add OTLP spans layer to the layer collection + #[cfg(feature = "otlp")] + pub fn with_span_layer( + &mut self, + service_name: String, + endpoint_exporter: url::Url, + level: tracing::Level, + ) -> eyre::Result<()> { + // Create the span provider + + let span_layer = span_layer(service_name, &endpoint_exporter) + .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? + .with_filter(tracing::level_filters::LevelFilter::from_level(level)); + + self.add_layer(span_layer); + + Ok(()) } }
diff --git reth/crates/transaction-pool/Cargo.toml scroll-reth/crates/transaction-pool/Cargo.toml index 0203071984017aa09a76fe7b19320b615516b230..17b177d286984c3d7f33377cb7d64d99d3c7df1a 100644 --- reth/crates/transaction-pool/Cargo.toml +++ scroll-reth/crates/transaction-pool/Cargo.toml @@ -61,6 +61,7 @@ proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true }   [dev-dependencies] +alloy-consensus = { workspace = true, features = ["k256"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-primitives = { workspace = true, features = ["rand"] }
diff --git reth/crates/transaction-pool/src/blobstore/disk.rs scroll-reth/crates/transaction-pool/src/blobstore/disk.rs index 5ccafe15000bc3238c6ed5933dd8dce808bdf898..b883345aac620cb17790c532fec5cd38ffc6bcea 100644 --- reth/crates/transaction-pool/src/blobstore/disk.rs +++ scroll-reth/crates/transaction-pool/src/blobstore/disk.rs @@ -4,6 +4,8 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; use alloy_eips::{ eip4844::{BlobAndProofV1, BlobAndProofV2}, eip7594::BlobTransactionSidecarVariant, + eip7840::BlobParams, + merge::EPOCH_SLOTS, }; use alloy_primitives::{TxHash, B256}; use parking_lot::{Mutex, RwLock}; @@ -13,6 +15,13 @@ use tracing::{debug, trace};   /// How many [`BlobTransactionSidecarVariant`] to cache in memory. pub const DEFAULT_MAX_CACHED_BLOBS: u32 = 100; + +/// A cache size heuristic based on the highest blob params +/// +/// This uses the max blobs per tx and max blobs per block over 16 epochs: `21 * 6 * 512 = 64512` +/// This should be ~4MB +const VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE: u64 = + BlobParams::bpo2().max_blobs_per_tx * BlobParams::bpo2().max_blob_count * EPOCH_SLOTS * 16;   /// A blob store that stores blob data on disk. /// @@ -288,7 +297,9 @@ blob_cache: Mutex::new(LruMap::new(ByLength::new(max_length))), size_tracker: Default::default(), file_lock: Default::default(), txs_to_delete: Default::default(), - versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new(max_length * 6))), + versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new( + VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE as u32, + ))), } }
diff --git reth/crates/transaction-pool/src/error.rs scroll-reth/crates/transaction-pool/src/error.rs index 74d92fb3e6b9494c4c5abbe02bb463853ac10b2a..6360817caa19f30df4b9264ea9f06e1c76b09fa1 100644 --- reth/crates/transaction-pool/src/error.rs +++ scroll-reth/crates/transaction-pool/src/error.rs @@ -157,7 +157,7 @@ MissingEip4844BlobSidecar, /// Thrown if an EIP-4844 transaction without any blobs arrives #[error("blobless blob transaction")] NoEip4844Blobs, - /// Thrown if an EIP-4844 transaction without any blobs arrives + /// Thrown if an EIP-4844 transaction arrives with too many blobs #[error("too many blobs in transaction: have {have}, permitted {permitted}")] TooManyEip4844Blobs { /// Number of blobs the transaction has
diff --git reth/crates/transaction-pool/src/lib.rs scroll-reth/crates/transaction-pool/src/lib.rs index 7f3fa4a1177a2425a201d0a1814b1da4e4b1d708..54c06b18fcca3ee7a2b683087c06b98d6d0ab171 100644 --- reth/crates/transaction-pool/src/lib.rs +++ scroll-reth/crates/transaction-pool/src/lib.rs @@ -250,9 +250,10 @@ //! TransactionValidationTaskExecutor::eth(client.clone(), blob_store.clone(), executor.clone()), //! blob_store, //! Default::default(), //! ); +//! let chainspec = client.chain_spec(); //! //! // spawn a task that listens for new blocks and updates the pool's transactions, mined transactions etc.. -//! tokio::task::spawn(maintain_transaction_pool_future(client, pool, stream, executor.clone(), Default::default())); +//! tokio::task::spawn(maintain_transaction_pool_future(client, chainspec, pool, stream, executor.clone(), Default::default())); //! //! # } //! ``` @@ -355,6 +356,7 @@ Self { pool: Arc::new(PoolInner::new(validator, ordering, blob_store, config)) } }   /// Returns the wrapped pool. + #[allow(clippy::missing_const_for_fn)] pub(crate) fn inner(&self) -> &PoolInner<V, T, S> { &self.pool }
diff --git reth/crates/transaction-pool/src/maintain.rs scroll-reth/crates/transaction-pool/src/maintain.rs index 732d55d0c3fe24c9b783b7a3cda35d1dd3af0125..6d289a48cedf251c8eb1e9419db14bf194271820 100644 --- reth/crates/transaction-pool/src/maintain.rs +++ scroll-reth/crates/transaction-pool/src/maintain.rs @@ -22,7 +22,10 @@ use reth_fs_util::FsPathError; use reth_primitives_traits::{ transaction::signed::SignedTransaction, NodePrimitives, SealedHeader, }; -use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; +use reth_storage_api::{ + errors::provider::ProviderError, BaseFeeProvider, BlockReaderIdExt, StateProviderBox, + StateProviderFactory, +}; use reth_tasks::TaskSpawner; use serde::{Deserialize, Serialize}; use std::{ @@ -92,8 +95,9 @@ } }   /// Returns a spawnable future for maintaining the state of the transaction pool. -pub fn maintain_transaction_pool_future<N, Client, P, St, Tasks>( +pub fn maintain_transaction_pool_future<N, Client, BaseFee, P, St, Tasks>( client: Client, + base_fee_provider: BaseFee, pool: P, events: St, task_spawner: Tasks, @@ -106,12 +110,14 @@ + BlockReaderIdExt<Header = N::BlockHeader> + ChainSpecProvider<ChainSpec: EthChainSpec<Header = N::BlockHeader>> + Clone + 'static, + BaseFee: BaseFeeProvider<StateProviderBox> + Send + 'static, P: TransactionPoolExt<Transaction: PoolTransaction<Consensus = N::SignedTx>> + 'static, St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { async move { - maintain_transaction_pool(client, pool, events, task_spawner, config).await; + maintain_transaction_pool(client, base_fee_provider, pool, events, task_spawner, config) + .await; } .boxed() } @@ -119,8 +125,9 @@ /// Maintains the state of the transaction pool by handling new blocks and reorgs. /// /// This listens for any new blocks and reorgs and updates the transaction pool's state accordingly -pub async fn maintain_transaction_pool<N, Client, P, St, Tasks>( +pub async fn maintain_transaction_pool<N, Client, BaseFee, P, St, Tasks>( client: Client, + base_fee_provider: BaseFee, pool: P, mut events: St, task_spawner: Tasks, @@ -132,6 +139,7 @@ + BlockReaderIdExt<Header = N::BlockHeader> + ChainSpecProvider<ChainSpec: EthChainSpec<Header = N::BlockHeader>> + Clone + 'static, + BaseFee: BaseFeeProvider<StateProviderBox> + Send + 'static, P: TransactionPoolExt<Transaction: PoolTransaction<Consensus = N::SignedTx>> + 'static, St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, @@ -142,13 +150,13 @@ // ensure the pool points to latest state if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) { let latest = SealedHeader::seal_slow(latest); let chain_spec = client.chain_spec(); + let base_fee = pool_pending_base_fee(&client, &base_fee_provider, latest.header()); + let info = BlockInfo { block_gas_limit: latest.gas_limit(), last_seen_block_hash: latest.hash(), last_seen_block_number: latest.number(), - pending_basefee: chain_spec - .next_block_base_fee(latest.header(), latest.timestamp()) - .unwrap_or_default(), + pending_basefee: base_fee, pending_blob_fee: latest .maybe_next_block_blob_fee(chain_spec.blob_params_at_timestamp(latest.timestamp())), }; @@ -324,9 +332,9 @@ let chain_spec = client.chain_spec();   // fees for the next block: `new_tip+1` - let pending_block_base_fee = chain_spec - .next_block_base_fee(new_tip.header(), new_tip.timestamp()) - .unwrap_or_default(); + let pending_block_base_fee = + pool_pending_base_fee(&client, &base_fee_provider, new_tip.header()); + let pending_block_blob_fee = new_tip.header().maybe_next_block_blob_fee( chain_spec.blob_params_at_timestamp(new_tip.timestamp()), ); @@ -427,9 +435,8 @@ let tip = blocks.tip(); let chain_spec = client.chain_spec();   // fees for the next block: `tip+1` - let pending_block_base_fee = chain_spec - .next_block_base_fee(tip.header(), tip.timestamp()) - .unwrap_or_default(); + let pending_block_base_fee = + pool_pending_base_fee(&client, &base_fee_provider, tip.header()); let pending_block_blob_fee = tip.header().maybe_next_block_blob_fee( chain_spec.blob_params_at_timestamp(tip.timestamp()), ); @@ -497,6 +504,24 @@ blob_store_tracker.add_new_chain_blocks(&blocks); } } } +} + +/// Computes the pending base fee for the pool. +fn pool_pending_base_fee< + Client: StateProviderFactory + BlockReaderIdExt, + BaseFee: BaseFeeProvider<StateProviderBox>, + H: BlockHeader, +>( + client: &Client, + base_fee_provider: &BaseFee, + parent_header: &H, +) -> u64 { + let provider = client.state_by_block_id(parent_header.number().into()); + provider + .and_then(|mut p| { + base_fee_provider.next_block_base_fee(&mut p, &parent_header, parent_header.timestamp()) + }) + .unwrap_or_else(|_| parent_header.base_fee_per_gas().unwrap_or_default()) }   struct FinalizedBlockTracker {
diff --git reth/crates/transaction-pool/src/pool/best.rs scroll-reth/crates/transaction-pool/src/pool/best.rs index a5aa664e76498d8f32905634809ab45e434ac4d5..90cd042df69eb78638bd47d538f9330372c6fcf2 100644 --- reth/crates/transaction-pool/src/pool/best.rs +++ scroll-reth/crates/transaction-pool/src/pool/best.rs @@ -16,6 +16,8 @@ }; use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug;   +const MAX_NEW_TRANSACTIONS_PER_BATCH: usize = 16; + /// An iterator that returns transactions that can be executed on the current state (*best* /// transactions). /// @@ -165,13 +167,17 @@ /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { - while let Some(pending_tx) = self.try_recv() { - // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked - let tx_id = *pending_tx.transaction.id(); - if self.ancestor(&tx_id).is_none() { - self.independent.insert(pending_tx.clone()); + for _ in 0..MAX_NEW_TRANSACTIONS_PER_BATCH { + if let Some(pending_tx) = self.try_recv() { + // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked + let tx_id = *pending_tx.transaction.id(); + if self.ancestor(&tx_id).is_none() { + self.independent.insert(pending_tx.clone()); + } + self.all.insert(tx_id, pending_tx); + } else { + break; } - self.all.insert(tx_id, pending_tx); } } }
diff --git reth/crates/transaction-pool/src/pool/mod.rs scroll-reth/crates/transaction-pool/src/pool/mod.rs index 04f0e6e0b31e0e19a9d53c0caa7ba10d41b99a63..7f528cc298c4946beee224a349f976953d6d2f5d 100644 --- reth/crates/transaction-pool/src/pool/mod.rs +++ scroll-reth/crates/transaction-pool/src/pool/mod.rs @@ -1088,6 +1088,7 @@ PendingTransactionIter { kind, iter } }   /// Returns if the transaction should be propagated. + #[allow(clippy::missing_const_for_fn)] pub(crate) fn is_propagate_allowed(&self) -> bool { self.transaction.propagate } @@ -1180,6 +1181,7 @@ } }   /// Returns the discarded transactions if there were any + #[allow(clippy::missing_const_for_fn)] pub(crate) fn discarded_transactions(&self) -> Option<&[Arc<ValidPoolTransaction<T>>]> { match self { Self::Pending(tx) => Some(&tx.discarded), @@ -1222,6 +1224,7 @@ }   /// Returns the [`TransactionId`] of the added transaction #[cfg(test)] + #[allow(clippy::missing_const_for_fn)] pub(crate) fn id(&self) -> &TransactionId { match self { Self::Pending(added) => added.transaction.id(),
diff --git reth/crates/transaction-pool/src/pool/parked.rs scroll-reth/crates/transaction-pool/src/pool/parked.rs index 43a652a1476083190e330d3dc0abc83ea9dbfea8..193442174caab148b2c013de077ffdb8af39c3a8 100644 --- reth/crates/transaction-pool/src/pool/parked.rs +++ scroll-reth/crates/transaction-pool/src/pool/parked.rs @@ -260,35 +260,33 @@ pub(crate) fn satisfy_base_fee_transactions( &self, basefee: u64, ) -> Vec<Arc<ValidPoolTransaction<T>>> { - let ids = self.satisfy_base_fee_ids(basefee as u128); - let mut txs = Vec::with_capacity(ids.len()); - for id in ids { - txs.push(self.get(&id).expect("transaction exists").transaction.clone().into()); - } + let mut txs = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + txs.push(tx.clone()); + }); txs }   /// Returns all transactions that satisfy the given basefee. - fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec<TransactionId> { - let mut transactions = Vec::new(); - { - let mut iter = self.by_id.iter().peekable(); + fn satisfy_base_fee_ids<F>(&self, basefee: u128, mut tx_handler: F) + where + F: FnMut(&Arc<ValidPoolTransaction<T>>), + { + let mut iter = self.by_id.iter().peekable();   - while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee { - // still parked -> skip descendant transactions - 'this: while let Some((peek, _)) = iter.peek() { - if peek.sender != id.sender { - break 'this - } - iter.next(); + while let Some((id, tx)) = iter.next() { + if tx.transaction.transaction.max_fee_per_gas() < basefee { + // still parked -> skip descendant transactions + 'this: while let Some((peek, _)) = iter.peek() { + if peek.sender != id.sender { + break 'this } - } else { - transactions.push(*id); + iter.next(); } + } else { + tx_handler(&tx.transaction); } } - transactions }   /// Removes all transactions from this subpool that can afford the given basefee, @@ -306,7 +304,10 @@ pub(crate) fn enforce_basefee_with<F>(&mut self, basefee: u64, mut tx_handler: F) where F: FnMut(Arc<ValidPoolTransaction<T>>), { - let to_remove = self.satisfy_base_fee_ids(basefee as u128); + let mut to_remove = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + to_remove.push(*tx.id()); + });   for id in to_remove { if let Some(tx) = self.remove_transaction(&id) {
diff --git reth/crates/transaction-pool/src/pool/pending.rs scroll-reth/crates/transaction-pool/src/pool/pending.rs index 9bd1d092b4f5b845faaa62af4f73347c2222dac4..317066137daaaa4a40cd24491f1cfd325d7c0768 100644 --- reth/crates/transaction-pool/src/pool/pending.rs +++ scroll-reth/crates/transaction-pool/src/pool/pending.rs @@ -921,8 +921,7 @@ let removed = pool.truncate_pool(SubPoolLimit { max_txs: 10, max_size: 1000 }); assert!(removed.is_empty());   // Verify that retrieving transactions from an empty pool yields nothing - let all_txs: Vec<_> = pool.all().collect(); - assert!(all_txs.is_empty()); + assert!(pool.all().next().is_none()); }   #[test]
diff --git reth/crates/transaction-pool/src/test_utils/pool.rs scroll-reth/crates/transaction-pool/src/test_utils/pool.rs index 6af440f086adb86667d2d873735e97f77bd061eb..ab7bebae2f505038fceb0d6c901be7ecfa3d490f 100644 --- reth/crates/transaction-pool/src/test_utils/pool.rs +++ scroll-reth/crates/transaction-pool/src/test_utils/pool.rs @@ -188,7 +188,7 @@ /// Send a tx with a higher nonce that what the sender has on chain HigherNonce { onchain: u64, nonce: u64 }, Multi { // Execute multiple test scenarios - scenario: Vec<Scenario>, + scenario: Vec<Self>, }, }
diff --git reth/crates/transaction-pool/src/validate/eth.rs scroll-reth/crates/transaction-pool/src/validate/eth.rs index 6d1a0147f0bf8715c05f49ec47923d4797438df8..14e91e16ca913cbfb1f3cdbf4353d17e532228c7 100644 --- reth/crates/transaction-pool/src/validate/eth.rs +++ scroll-reth/crates/transaction-pool/src/validate/eth.rs @@ -1219,11 +1219,17 @@ SpecId::SHANGHAI } else { SpecId::MERGE }; + // TODO(scroll): SpecId is starting to leak from revm to reth. Find a solution to avoid + // having to add `is_eip_7702_enabled` everywhere. + let is_eip7702_enabled = true; + let is_eip7623_enabled = true;   let gas = revm_interpreter::gas::calculate_initial_tx_gas( spec_id, transaction.input(), transaction.is_create(), + is_eip7702_enabled, + is_eip7623_enabled, transaction.access_list().map(|l| l.len()).unwrap_or_default() as u64, transaction .access_list() @@ -1673,7 +1679,7 @@ transaction.sender(), ExtendedAccount::new(transaction.nonce(), alloy_primitives::U256::ZERO), );   - // Valdiate with balance check enabled + // Validate with balance check enabled let validator = EthTransactionValidatorBuilder::new(provider.clone()) .build(InMemoryBlobStore::default());   @@ -1689,7 +1695,7 @@ } else { panic!("Expected Invalid outcome with InsufficientFunds error"); }   - // Valdiate with balance check disabled + // Validate with balance check disabled let validator = EthTransactionValidatorBuilder::new(provider) .disable_balance_check() // This should allow the transaction through despite zero balance .build(InMemoryBlobStore::default());
diff --git reth/deny.toml scroll-reth/deny.toml index fd2eb5c11cdf24e638c0e0470a571b26f9d8500c..82bacf4df6d3095f9ad49927f152fd752a421146 100644 --- reth/deny.toml +++ scroll-reth/deny.toml @@ -20,7 +20,7 @@ # Lint level for when a crate version requirement is `*` wildcards = "allow" highlight = "all" # List of crates to deny -deny = [{ name = "openssl" }] +# TODO issue #201 deny = [{ name = "openssl" }] # Certain crates/versions that will be skipped when doing duplicate detection. skip = [] # Similarly to `skip` allows you to skip certain crates during duplicate @@ -62,8 +62,23 @@ exceptions = [ # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in https://github.com/paradigmxyz/reth/pull/6980 { allow = ["MPL-2.0"], name = "option-ext" }, - { allow = ["MPL-2.0"], name = "webpki-root-certs" }, +] + +# Skip the poseidon-bn254, bn254 and zktrie crates for license verification. We should at some point publish a license for them. +[licenses.private] +ignore = true +ignore-sources = [ + "https://github.com/scroll-tech/poseidon-bn254", + "https://github.com/scroll-tech/bn254", + "https://github.com/scroll-tech/zktrie.git", + "https://github.com/scroll-tech/scroll-revm.git", + "https://github.com/scroll-tech/da-codec.git", ] + +[[licenses.clarify]] +name = "ring" +expression = "LicenseRef-ring" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }]   [[licenses.clarify]] name = "rustls-webpki" @@ -89,4 +104,10 @@ "https://github.com/paradigmxyz/revm-inspectors", "https://github.com/alloy-rs/evm", "https://github.com/alloy-rs/hardforks", "https://github.com/paradigmxyz/jsonrpsee", + "https://github.com/scroll-tech/bn254", + "https://github.com/scroll-tech/sp1-intrinsics", + "https://github.com/scroll-tech/poseidon-bn254", + "https://github.com/scroll-tech/scroll-revm.git", + "https://github.com/scroll-tech/revm.git", + "https://github.com/scroll-tech/da-codec.git", ]
diff --git reth/docs/vocs/docs/pages/cli/reth.mdx scroll-reth/docs/vocs/docs/pages/cli/reth.mdx index 9a32d6478766ef47abed898a1491784d57b93759..5f0ccfca01f77e335260dfd6c4cdac6b94be946b 100644 --- reth/docs/vocs/docs/pages/cli/reth.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth.mdx @@ -113,4 +113,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/config.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/config.mdx index b449f118168d3725a297ae8cdc3b7ca1e8776a66..849f4ec5bab7cfb20b1cb6d90eebedce93829215 100644 --- reth/docs/vocs/docs/pages/cli/reth/config.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/config.mdx @@ -99,4 +99,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db.mdx index 2553a1480f9813912c0e58c513ddb163dc0f513f..3b28b43162a03254439087e0b74835f9ec95faf0 100644 --- reth/docs/vocs/docs/pages/cli/reth/db.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db.mdx @@ -164,4 +164,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/checksum.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index ba12fd1b2f55fd86c3932fe85fc1f7864e021735..13e2c2bd39d3d0048c82f3164d1834ff5334a97f 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -116,4 +116,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/clear.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 79e324021bfcdf0c5b50e018a81a24dcf0a1deb4..5c19682e8b62afacae19c9bb982825b48d94d8b2 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -108,4 +108,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 843f5253c9a96633351c8d6215f0b5af8119c537..0e5526affe5088c9adec08a34a724e6e3e9c1ea9 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -107,4 +107,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 3af272ff36294756be163b25100396911b57757b..72c3108fcf3fca829efde337ec4e43494513bdf8 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -110,4 +110,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/diff.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/diff.mdx index f440545f1298612f0aae83bbc1f5dabc06cf1f0d..fadd0613ca84566b1dd920bf61e4074c65665f3d 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -143,4 +143,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/drop.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 64552318a21cd71147f743af402f94ae1bc555be..0f9ddba9ee92c09595130ab0136adf0766c77773 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -106,4 +106,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/get.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/get.mdx index c7fc831b764daf3666c79e96eaed18ff7448e62c..942eda79998d4591548c0e7db1fcfe578158aa25 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -108,4 +108,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 48fd6c889c6e01dad7739e59142300a93239a20e..b7ccf9e7d3da7622173e7518932b30034bbc940b 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -116,4 +116,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index af21819a452c25c9a96b3f7b0ecaf2b16eadc55d..28d7c343e94fe9abf4ee997449d97289b1a58f3f 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -116,4 +116,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/list.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/list.mdx index cff6c7eed5e3883bd87d4fb9ceb7ea8083035e0d..3f9ac94c5c5fb0b4eec3610d80196b6675b9c9d4 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -149,4 +149,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/path.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/path.mdx index 1dd3279a79783c9dfa6410192234367fa906be7a..f6714898b35758056a9d6e77f600eda58705c146 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -103,4 +103,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index f50582651962f1651b8094f8a0019f18f0999c61..3a6bfae1d3c1f85035a48272091dcce4831e1128 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -106,4 +106,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/stats.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 1f2c50908dccd2af0b20552d7d2cd609def77bf7..a4939c3ef93dccedf5744438d8c039a06f9df1fc 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -116,4 +116,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/db/version.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/db/version.mdx index a683749fcdf83b2eaaf585bd466f5b72bcd085b9..7b3766b4e8ada332ad259870b06cf5953507d5f8 100644 --- reth/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -103,4 +103,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/download.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/download.mdx index 973dce74a2284f1ff2881e8e8c9954fd37c10b94..7429653885557cfd57eab05ca230d011cb2a7b52 100644 --- reth/docs/vocs/docs/pages/cli/reth/download.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/download.mdx @@ -161,4 +161,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 6bc27381a24beb792a50cbe18ff5437bf461ad7a..a6dbbcb1b27fb7ec40bf26fe5431fdea11778cde 100644 --- reth/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -102,4 +102,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/export-era.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/export-era.mdx index 896f7f34d085573e7109859671a564296d046017..ee65abbeb42f8f22fdcdb5c4e7b50ea706e425dd 100644 --- reth/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -167,4 +167,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/import-era.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/import-era.mdx index a783067d1936633e37307c9ac3427e8a8ac172ef..ae17ab91e0eb6e28cfc4aa2e916e2f33e8a8b35a 100644 --- reth/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -162,4 +162,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/import.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/import.mdx index 0914444e1085e64591f47ac2733faa494eb9d740..f92b52ec5911835805166e7e30be62bc3af3f393 100644 --- reth/docs/vocs/docs/pages/cli/reth/import.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/import.mdx @@ -163,4 +163,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/init-state.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/init-state.mdx index 8c0cfa6e4d3157e6dab21daa36b0a692e0dbd7d2..03d1e7b883bcf53615a188c76808335674a14236 100644 --- reth/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -186,4 +186,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/init.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/init.mdx index b1ac27e8ba713b095ca5ec144f0674a71a6c21e1..993ae2dcd8577998d3f72e8afcccd90fe32e5a66 100644 --- reth/docs/vocs/docs/pages/cli/reth/init.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/init.mdx @@ -151,4 +151,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/node.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/node.mdx index 2021b342d62be265a15e0d9c91db17a785c3c84c..8ac6156c664c8c88e7a7bff965152322878cfbbd 100644 --- reth/docs/vocs/docs/pages/cli/reth/node.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/node.mdx @@ -39,7 +39,7 @@ -h, --help Print help (see a summary with '-h')   Metrics: - --metrics <SOCKET> + --metrics <PROMETHEUS> Enable Prometheus metrics.   The metrics will be served at the given interface and port. @@ -247,6 +247,9 @@ [default: sqrt]   --required-block-hashes <REQUIRED_BLOCK_HASHES> Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + + --network-id <NETWORK_ID> + Optional network ID to override the chain specification's network ID for P2P connections   RPC: --http @@ -619,10 +622,10 @@ Interval is specified in seconds or in milliseconds if the value ends with `ms`: * `50ms` -> 50 milliseconds * `1` -> 1 second   [default: 1]   - --builder.deadline <SECONDS> + --builder.deadline <DEADLINE> The deadline for when the payload builder job should resolve   - [default: 12] + [default: 12s]   --builder.max-tasks <MAX_PAYLOAD_TASKS> Maximum number of tasks to spawn for building a payload @@ -864,6 +867,12 @@ --engine.allow-unwind-canonical-header Allow unwinding canonical header to ancestor during forkchoice updates. See `TreeConfig::unwind_canonical_header` for more details   + --engine.storage-worker-count <STORAGE_WORKER_COUNT> + Configure the number of storage proof workers in the Tokio blocking pool. If not specified, defaults to 2x available parallelism, clamped between 2 and 64 + + --engine.account-worker-count <ACCOUNT_WORKER_COUNT> + Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers + ERA: --era.enable Enable import from ERA1 files @@ -984,4 +993,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/p2p.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/p2p.mdx index 6b24d9d326bcef598ad89fd0f823aa5720572655..9693e20e756026749574f5225c8f5523d4d980fd 100644 --- reth/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -100,4 +100,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/p2p/body.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index ecd6ccf81411ce5341d739353bba46c6e0f1f04c..ae0f3d293d10a049a64ab38faf2e7fc848f5b1de 100644 --- reth/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -206,6 +206,9 @@ --required-block-hashes <REQUIRED_BLOCK_HASHES> Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out   + --network-id <NETWORK_ID> + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir <DATA_DIR> The path to the data dir for all reth files and subdirectories. @@ -317,4 +320,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 2a0a5b6a8081b68f71c308684e6c5b12e1a8e193..d1bf7c69870fedd8c8c8387fdb16d7bfd118c00b 100644 --- reth/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -111,4 +111,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/p2p/header.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index fee957e3385071833f92a1cac8a4d4baef33aa2c..9e542916d4cbf5465dd767fc8a08b5f28e830cc4 100644 --- reth/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -206,6 +206,9 @@ --required-block-hashes <REQUIRED_BLOCK_HASHES> Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out   + --network-id <NETWORK_ID> + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir <DATA_DIR> The path to the data dir for all reth files and subdirectories. @@ -317,4 +320,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index dbd7ca91b341c5480b274d58745dcc992ea17f72..75ab654964f58ccdd4a65cfde1fc52dab0218229 100644 --- reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -97,4 +97,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index ac123d472852d3ca0fbe36f83fec85cd43ed5da3..7152b222fb4c48fead1ae735a2dd4210c9b43528 100644 --- reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -97,4 +97,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/prune.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/prune.mdx index ce6bc399d8ee79e501d9ef753a45939345764a3b..f54f668780577a52f41c35684a05a92de35a04f2 100644 --- reth/docs/vocs/docs/pages/cli/reth/prune.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -151,4 +151,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/re-execute.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/re-execute.mdx index ec5e048b5cd654734d84300ecaf40af606af15b2..973ac79f29fb48ae4a6d6893913ec66525482546 100644 --- reth/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -164,4 +164,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage.mdx index bc693f7e463a6e5d52db7291b13d94282fa18458..f382eb2081ecfaca7d09cf959a07d6269fad5059 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -100,4 +100,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/drop.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index a36545638cec07cca1dc0237b66a3a8b42aef2ba..e2ba5751b522a351a6ecee91b8e4c21c08f7e6f7 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -165,4 +165,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/dump.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 97211934295f71d905ac80ec7c2b994b3b2cf857..01b4f61f29fc0341d8290a0bd72e1fb39b0ae4ea 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -158,4 +158,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index c1459ee5498c74bff0d5a1a1b6a9578a8bad50ed..18f44ae13ed66b32d29bd294e70fb3a6c766f959 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -115,4 +115,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 4f39dccac120890c72177eb0ef8262262c8923da..de0f693ed578b4c9edbe36c0b29abaa3db791903 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -115,4 +115,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index f5d6a07b09a4a21a1b9329c9e8785aa6252492d6..aaff755796a42f4d650613dc39817320a5a9c136 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -115,4 +115,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index fce03ffa753d3b3bea774e66f850a97e30734c3c..2ff7b22b76bd6c6790faf4f9c74651a1a358fe39 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -115,4 +115,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/run.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 76ce30a2f79d302a8720d624860bac87aaeb918e..2af69a053d642a5d87fbabbf9fd4451192318cc2 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -302,6 +302,9 @@ --required-block-hashes <REQUIRED_BLOCK_HASHES> Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out   + --network-id <NETWORK_ID> + Optional network ID to override the chain specification's network ID for P2P connections + Logging: --log.stdout.format <FORMAT> The format to use for logs written to stdout @@ -383,4 +386,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 1a3fd02cae82b3daa201afcb7883db7a7852671f..977d949a9b7f343ecea514b8927a5c90a6767c47 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -159,4 +159,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index bed98899e195a3fce1bef8b1d8ea9a175f07bfb9..0b60467c413849b8141bd08e51fec55065884a60 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -107,4 +107,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx scroll-reth/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index bcfc87cf3e573d7baf9cdd6c10542d3517be124d..07632cf8285568a28930567728af25da50ee339a 100644 --- reth/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ scroll-reth/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -107,4 +107,21 @@ -vvvvv Traces (warning: very verbose!)   -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=<URL>] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level <LEVEL> + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file
diff --git reth/docs/vocs/docs/pages/run/monitoring.mdx scroll-reth/docs/vocs/docs/pages/run/monitoring.mdx index 30ce967bb10e3ffa2c7aa8774f19b0fecc02392d..d6c734360984e7a32b890f433a0ed2a5f0882089 100644 --- reth/docs/vocs/docs/pages/run/monitoring.mdx +++ scroll-reth/docs/vocs/docs/pages/run/monitoring.mdx @@ -10,6 +10,12 @@ ```bash reth node --metrics 127.0.0.1:9001 ```   +Alternatively, you can export metrics to an OpenTelemetry collector using `--otlp-metrics`: + +```bash +reth node --otlp-metrics 127.0.0.1:4318 +``` + Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time:   ```bash
diff --git reth/etc/grafana/dashboards/overview.json scroll-reth/etc/grafana/dashboards/overview.json index 5b271d7ea8e6b9f54e63bb7660d6cf88ad3c99e6..46a465ca4a412f9162f2101c1bd5008dcb7f284c 100644 --- reth/etc/grafana/dashboards/overview.json +++ scroll-reth/etc/grafana/dashboards/overview.json @@ -3931,7 +3931,7 @@ "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "Precompile cache hits", + "legendFormat": "{{address}}", "range": true, "refId": "A", "useBackend": false
diff --git reth/etc/grafana/scroll/overview.json scroll-reth/etc/grafana/scroll/overview.json new file mode 100644 index 0000000000000000000000000000000000000000..870c83bcabe988d4c073f421ae0c95df0d42a562 --- /dev/null +++ scroll-reth/etc/grafana/scroll/overview.json @@ -0,0 +1,11919 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + }, + { + "name": "DS_EXPRESSION", + "label": "Expression", + "description": "", + "type": "datasource", + "pluginId": "__expr__" + }, + { + "name": "VAR_INSTANCE_LABEL", + "type": "constant", + "label": "Instance Label", + "value": "job", + "description": "" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "datasource", + "id": "__expr__", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "bargauge", + "name": "Bar gauge", + "version": "" + }, + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "12.1.0-pre" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 96, + "panels": [], + "repeat": "instance", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "dark-red", + "value": 0 + }, + { + "color": "semi-dark-orange", + "value": 10 + }, + { + "color": "semi-dark-yellow", + "value": 20 + }, + { + "color": "semi-dark-green", + "value": 30 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 4 + }, + "id": 194, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_network_connected_peers{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Connected peers", + "transparent": true, + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The checkpoints mark the last block a stage can recover from in the case of a crash or shutdown of the node", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 4 + }, + "id": 20, + "options": { + "displayMode": "lcd", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_sync_checkpoint{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{stage}}", + "range": false, + "refId": "A" + } + ], + "title": "Stage checkpoints", + "transformations": [ + { + "id": "joinByField", + "options": { + "mode": "outer" + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "includeByName": {}, + "indexByName": { + "AccountHashing": 6, + "Bodies": 1, + "Execution": 3, + "Finish": 13, + "Headers": 0, + "IndexAccountHistory": 11, + "IndexStorageHistory": 10, + "MerkleExecute": 8, + "MerkleUnwind": 5, + "Prune": 12, + "PruneSenderRecovery": 4, + "SenderRecovery": 2, + "StorageHashing": 7, + "Time": 14, + "TransactionLookup": 9 + }, + "renameByName": {} + } + } + ], + "transparent": true, + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 4 + }, + "id": 218, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\"})", + "legendFormat": "Database", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_freelist{service=\"$service\", namespace=\"$env\"} * reth_db_page_size{service=\"$service\", namespace=\"$env\"})", + "hide": false, + "instant": false, + "legendFormat": "Freelist", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\"})", + "hide": false, + "instant": false, + "legendFormat": "Static Files", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\"}) + sum(reth_db_freelist{service=\"$service\", namespace=\"$env\"} * reth_db_page_size{service=\"$service\", namespace=\"$env\"}) + sum(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\"})", + "hide": false, + "instant": false, + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "Storage size", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 69, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_sync_entities_processed{service=\"$service\", namespace=\"$env\"} / reth_sync_entities_total{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "{{stage}}", + "range": true, + "refId": "A" + } + ], + "title": "Sync progress (stage progress in %)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_sync_checkpoint{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "{{stage}}", + "range": true, + "refId": "A" + } + ], + "title": "Sync progress (stage progress as highest block number reached)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 211, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 min", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p50", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p95", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV1 p99", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 min", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p50", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p90", + "range": true, + "refId": "H", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p95", + "range": true, + "refId": "I", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV2 p99", + "range": true, + "refId": "J", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 min", + "range": true, + "refId": "K", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p50", + "range": true, + "refId": "L", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p90", + "range": true, + "refId": "M", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p95", + "range": true, + "refId": "N", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_fork_choice_updated_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_forkchoiceUpdatedV3 p99", + "range": true, + "refId": "O", + "useBackend": false + } + ], + "title": "Engine API forkchoiceUpdated Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Latency histogram for the engine_newPayload RPC API", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 210, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 min", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p50", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p90", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p95", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV1 p99", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 min", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p50", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p90", + "range": true, + "refId": "H", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p95", + "range": true, + "refId": "I", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v2{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV2 p99", + "range": true, + "refId": "J", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 min", + "range": true, + "refId": "K", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p50", + "range": true, + "refId": "L", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p90", + "range": true, + "refId": "M", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p95", + "range": true, + "refId": "N", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v3{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV3 p99", + "range": true, + "refId": "O", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 min", + "range": true, + "refId": "P", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p50", + "range": true, + "refId": "Q", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p90", + "range": true, + "refId": "R", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p95", + "range": true, + "refId": "S", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_v4{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_newPayloadV4 p99", + "range": true, + "refId": "T", + "useBackend": false + } + ], + "title": "Engine API newPayload Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The metric is the amount of gas processed in a block", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "sishort" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 29 + }, + "id": 1004, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "legendFormat": "p50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "hide": false, + "legendFormat": "p90", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "hide": false, + "legendFormat": "p95", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_total_gas{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "hide": false, + "legendFormat": "p99", + "range": true, + "refId": "D" + } + ], + "title": "Engine API newPayload Total Gas", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The throughput of the Engine API newPayload method. The metric is the amount of gas processed in a block, divided by the time it took to process the newPayload request.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "si: gas/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 1003, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "legendFormat": "p50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "hide": false, + "legendFormat": "p90", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "hide": false, + "legendFormat": "p95", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_gas_per_second{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "hide": false, + "legendFormat": "p99", + "range": true, + "refId": "D" + } + ], + "title": "Engine API newPayload Throughput", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The throughput of the node's executor. The metric is the amount of gas processed in a block, divided by the time it took to process the block.\n\nNote: For mainnet, the block range 2,383,397-2,620,384 will be slow because of the 2016 DoS attack.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "si: gas/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 56, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Gas/s", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}[1m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1m)", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}[5m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (5m)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}[10m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (10m)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}[30m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (30m)", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}[1h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1h)", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{service=\"$service\", namespace=\"$env\"}[24h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (24h)", + "range": true, + "refId": "G", + "useBackend": false + } + ], + "title": "Execution throughput", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 240, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_block_validation_state_root_duration{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_execution_execution_duration{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Execution Duration", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Block Processing Latency", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 87, + "panels": [], + "repeat": "instance", + "title": "Engine API", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 46 + }, + "id": 84, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_forkchoice_updated_messages{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "forkchoiceUpdated", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "newPayload", + "range": true, + "refId": "B" + } + ], + "title": "Engine API messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Counts the number of failed response deliveries due to client request termination.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 46 + }, + "id": 249, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "newPayload", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "forkchoiceUpdated", + "range": true, + "refId": "B" + } + ], + "title": "Failed Engine API Response Deliveries", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Latency histogram for the engine_newPayload to engine_forkchoiceUpdated", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 54 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "p{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Engine API latency between forkchoiceUpdated and newPayload", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 54 + }, + "id": 212, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 min", + "range": true, + "refId": "O", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p50", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p90", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p95", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_hash_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByHashV1 p99", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByRangeV1 min", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByRangeV1 p50", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.9\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByRangeV1 p90", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByRangeV1 p95", + "range": true, + "refId": "H", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_payload_bodies_by_range_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getPayloadBodiesByRangeV1 p99", + "range": true, + "refId": "I", + "useBackend": false + } + ], + "title": "Engine API getPayloadBodies Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 62 + }, + "id": 1000, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_engine_rpc_blobs_blob_count{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "Found", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_engine_rpc_blobs_blob_misses{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Missed", + "range": true, + "refId": "B" + } + ], + "title": "Blob Count and Misses", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 62 + }, + "id": 258, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.5\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 p50", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.95\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 p95", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{service=\"$service\", namespace=\"$env\", quantile=\"0.99\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 p99", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{service=\"$service\", namespace=\"$env\", quantile=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 min", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_engine_rpc_get_blobs_v1{service=\"$service\", namespace=\"$env\", quantile=\"1\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "engine_getBlobsV1 max", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Engine API getBlobs Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Total pipeline runs triggered by the sync controller", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 70 + }, + "id": 85, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_consensus_engine_beacon_pipeline_runs{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Pipeline runs", + "range": true, + "refId": "A" + } + ], + "title": "Pipeline runs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 70 + }, + "id": 83, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_consensus_engine_beacon_active_block_downloads{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Active block downloads", + "range": true, + "refId": "A" + } + ], + "title": "Active block downloads", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 78 + }, + "id": 46, + "panels": [], + "repeat": "instance", + "title": "Execution", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 24, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "percent" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 79 + }, + "id": 1001, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_block_validation_state_root_duration{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_execution_execution_duration{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Execution Duration", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Block Processing Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 79 + }, + "id": 251, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_account_cache_hits{service=\"$service\", namespace=\"$env\"} / (reth_sync_caching_account_cache_hits{service=\"$service\", namespace=\"$env\"} + reth_sync_caching_account_cache_misses{service=\"$service\", namespace=\"$env\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Account cache hits", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_storage_cache_hits{service=\"$service\", namespace=\"$env\"} / (reth_sync_caching_storage_cache_hits{service=\"$service\", namespace=\"$env\"} + reth_sync_caching_storage_cache_misses{service=\"$service\", namespace=\"$env\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Storage cache hits", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_code_cache_hits{service=\"$service\", namespace=\"$env\"} / (reth_sync_caching_code_cache_hits{service=\"$service\", namespace=\"$env\"} + reth_sync_caching_code_cache_misses{service=\"$service\", namespace=\"$env\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Code cache hits", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Execution cache hitrate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The time it takes for operations that are part of block validation, but not execution or state root, to complete.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 87 + }, + "id": 252, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_block_validation_trie_input_duration{service=\"$service\", namespace=\"$env\", quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Trie input creation duration p{{quantile}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Block validation overhead", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "Precompile cache hits" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 87 + }, + "id": 1005, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_sync_caching_precompile_cache_hits{service=\"$service\", namespace=\"$env\"} / (reth_sync_caching_precompile_cache_hits{service=\"$service\", namespace=\"$env\"} + reth_sync_caching_precompile_cache_misses{service=\"$service\", namespace=\"$env\"})", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{address}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Precompile cache hitrate", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 95 + }, + "id": 214, + "panels": [], + "title": "State Root Task", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 96 + }, + "id": 255, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_tree_root_proofs_processed_histogram{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "Proofs Processed", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 96 + }, + "id": 254, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_tree_root_proof_calculation_duration_histogram{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "Proof calculation duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 104 + }, + "id": 257, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_multiproofs_histogram{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "Pending MultiProof requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 104 + }, + "id": 256, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_tree_root_inflight_multiproofs_histogram{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "{{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "In-flight MultiProof requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 112 + }, + "id": 260, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "Account {{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "Total multiproof account nodes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 112 + }, + "id": 259, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "Storage {{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "Total multiproof storage nodes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 120 + }, + "id": 262, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "hide": false, + "instant": false, + "legendFormat": "Account {{quantile}} percentile", + "range": true, + "refId": "A" + } + ], + "title": "Redundant multiproof account nodes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 120 + }, + "id": 261, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "instant": false, + "legendFormat": "Storage {{quantile}} percentile", + "range": true, + "refId": "Branch Nodes" + } + ], + "title": "Redundant multiproof storage nodes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "How much time is spent in the multiproof task", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 128 + }, + "id": 263, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_tree_root_multiproof_task_total_duration_histogram{service=\"$service\", namespace=\"$env\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "hide": false, + "instant": false, + "legendFormat": "Task duration {{quantile}} percentile", + "range": true, + "refId": "A" + } + ], + "title": "Proof fetching total duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Histogram for state root latency, the time spent blocked waiting for the state root.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 128 + }, + "id": 1006, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_block_validation_state_root_histogram{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration p{{quantile}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "State root latency", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 136 + }, + "id": 38, + "panels": [], + "repeat": "instance", + "title": "Database", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The average commit time for database transactions. Generally, this should not be a limiting factor in syncing.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic", + "seriesBy": "last" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 137 + }, + "id": 40, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(rate(reth_database_transaction_close_duration_seconds_sum{service=\"$service\", namespace=\"$env\", outcome=\"commit\"}[$__rate_interval]) / rate(reth_database_transaction_close_duration_seconds_count{service=\"$service\", namespace=\"$env\", outcome=\"commit\"}[$__rate_interval]) >= 0)", + "format": "time_series", + "instant": false, + "legendFormat": "Commit time", + "range": true, + "refId": "A" + } + ], + "title": "Average commit time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 137 + }, + "id": 42, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Commit time" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(max_over_time(reth_database_transaction_close_duration_seconds{service=\"$service\", namespace=\"$env\", outcome=\"commit\"}[$__rate_interval])) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Commit time heatmap", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The average time a database transaction was open.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic", + "seriesBy": "last" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 145 + }, + "id": 117, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(reth_database_transaction_open_duration_seconds_sum{service=\"$service\", namespace=\"$env\", outcome!=\"\"}[$__rate_interval]) / rate(reth_database_transaction_open_duration_seconds_count{service=\"$service\", namespace=\"$env\", outcome!=\"\"}[$__rate_interval])) by (outcome, mode)", + "format": "time_series", + "instant": false, + "legendFormat": "{{mode}}, {{outcome}}", + "range": true, + "refId": "A" + } + ], + "title": "Average transaction open time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The maximum time the database transaction was open.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 145 + }, + "id": 116, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max(max_over_time(reth_database_transaction_open_duration_seconds{service=\"$service\", namespace=\"$env\", outcome!=\"\", quantile=\"1\"}[$__interval])) by (outcome, mode)", + "format": "time_series", + "instant": false, + "legendFormat": "{{mode}}, {{outcome}}", + "range": true, + "refId": "A" + } + ], + "title": "Max transaction open time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "txs", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "Diff(opened-closed)" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [ + 0, + 10 + ], + "fill": "dot" + } + }, + { + "id": "custom.axisLabel", + "value": "diff" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 153 + }, + "id": 119, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum(reth_database_transaction_opened_total{service=\"$service\", namespace=\"$env\", mode=\"read-write\"})", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Opened", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(reth_database_transaction_closed_total{service=\"$service\", namespace=\"$env\", mode=\"read-write\"})", + "format": "time_series", + "instant": false, + "legendFormat": "Closed {{mode}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "expression": "${A} - ${B}", + "hide": false, + "refId": "Diff(opened-closed)", + "type": "math" + } + ], + "title": "Number of read-write transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "txs", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "Diff(opened, closed)" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [ + 0, + 10 + ], + "fill": "dot" + } + }, + { + "id": "custom.axisLabel", + "value": "diff" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 153 + }, + "id": 250, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_database_transaction_opened_total{service=\"$service\", namespace=\"$env\", mode=\"read-only\"}", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Opened", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum(reth_database_transaction_closed_total{service=\"$service\", namespace=\"$env\", mode=\"read-only\"})", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Closed {{mode}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "expression": "${A} - ${B}", + "hide": false, + "refId": "Diff(opened, closed)", + "type": "math" + } + ], + "title": "Number of read-only transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The size of tables in the database", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 161 + }, + "id": 48, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_db_table_size{service=\"$service\", namespace=\"$env\"}", + "interval": "", + "legendFormat": "{{table}}", + "range": true, + "refId": "A" + } + ], + "title": "Database tables", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The maximum time the database transaction operation which inserts a large value took.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 161 + }, + "id": 118, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max(max_over_time(reth_database_operation_large_value_duration_seconds{service=\"$service\", namespace=\"$env\", quantile=\"1\"}[$__interval]) > 0) by (table)", + "format": "time_series", + "instant": false, + "legendFormat": "{{table}}", + "range": true, + "refId": "A" + } + ], + "title": "Max insertion operation time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The type of the pages in the database:\n\n- **Leaf** pages contain KV pairs.\n- **Branch** pages contain information about keys in the leaf pages\n- **Overflow** pages store large values and should generally be avoided if possible", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 169 + }, + "id": 50, + "options": { + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "sum by (type) ( reth_db_table_pages{service=\"$service\", namespace=\"$env\"} )", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Database pages", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The size of the database over time", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 4, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 169 + }, + "id": 52, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum by (job) ( reth_db_table_size{service=\"$service\", namespace=\"$env\"} )", + "legendFormat": "Size ({{job}})", + "range": true, + "refId": "A" + } + ], + "title": "Database growth", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of pages on the MDBX freelist", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 177 + }, + "id": 113, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_freelist{service=\"$service\", namespace=\"$env\"}) by (job)", + "legendFormat": "Pages ({{job}})", + "range": true, + "refId": "A" + } + ], + "title": "Freelist", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "type" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + }, + { + "id": "displayName", + "value": "Overflow pages" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "table" + }, + "properties": [ + { + "id": "displayName", + "value": "Table" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 177 + }, + "id": 58, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sort_desc(reth_db_table_pages{service=\"$service\", namespace=\"$env\", type=\"overflow\"} != 0)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Overflow pages by table", + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 185 + }, + "id": 203, + "panels": [], + "title": "Static Files", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The size of segments in the static files", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 186 + }, + "id": 202, + "options": { + "displayLabels": [ + "name" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_static_files_segment_size{service=\"$service\", namespace=\"$env\"}", + "interval": "", + "legendFormat": "{{segment}}", + "range": true, + "refId": "A" + } + ], + "title": "Segments size", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + }, + { + "id": "displayName", + "value": "Entries" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "segment" + }, + "properties": [ + { + "id": "displayName", + "value": "Segment" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 186 + }, + "id": 204, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "reth_static_files_segment_entries{service=\"$service\", namespace=\"$env\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Entries per segment", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "unit", + "value": "locale" + }, + { + "id": "displayName", + "value": "Files" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "segment" + }, + "properties": [ + { + "id": "displayName", + "value": "Segment" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "instance" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "job" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "__name__" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 186 + }, + "id": 205, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "reth_static_files_segment_files{service=\"$service\", namespace=\"$env\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Files per segment", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The size of the static files over time", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 194 + }, + "id": 206, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum by (job) ( reth_static_files_segment_size{service=\"$service\", namespace=\"$env\"} )", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Static Files growth", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The maximum time the static files operation which commits a writer took.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 194 + }, + "id": 207, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{service=\"$service\", namespace=\"$env\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", + "legendFormat": "{{segment}}", + "range": true, + "refId": "A" + } + ], + "title": "Max writer commit time", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 202 + }, + "id": 79, + "panels": [], + "repeat": "instance", + "title": "Blockchain Tree", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The block number of the tip of the canonical chain from the blockchain tree.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 203 + }, + "id": 74, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_blockchain_tree_canonical_chain_height{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Canonical chain height", + "range": true, + "refId": "B" + } + ], + "title": "Canonical chain height", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Total number of blocks in the tree's block buffer", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 203 + }, + "id": 80, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_blockchain_tree_block_buffer_blocks{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Buffered blocks", + "range": true, + "refId": "B" + } + ], + "title": "Block buffer blocks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 211 + }, + "id": 1002, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "increase(reth_blockchain_tree_reorgs{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Reorgs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 211 + }, + "id": 190, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_blockchain_tree_latest_reorg_depth{service=\"$service\", namespace=\"$env\"}", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Latest Reorg Depth", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 219 + }, + "id": 108, + "panels": [], + "title": "RPC server", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 220 + }, + "id": 109, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "sum(reth_rpc_server_connections_connections_opened_total{service=\"$service\", namespace=\"$env\"} - reth_rpc_server_connections_connections_closed_total{service=\"$service\", namespace=\"$env\"}) by (transport)", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "{{transport}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Active Connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 220 + }, + "id": 111, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Latency time" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(max_over_time(reth_rpc_server_connections_request_time_seconds{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) > 0) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Request Latency time", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 228 + }, + "id": 120, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{service=\"$service\", namespace=\"$env\"}[$__rate_interval])) by (method) > 0", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Maximum call latency per method", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 228 + }, + "id": 112, + "maxDataPoints": 25, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "s" + }, + "color": { + "exponent": 0.2, + "fill": "dark-orange", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-09 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Latency time" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisLabel": "Quantile", + "axisPlacement": "left", + "reverse": false, + "unit": "percentunit" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(max_over_time(reth_rpc_server_calls_time_seconds{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) > 0) by (quantile)", + "format": "time_series", + "instant": false, + "legendFormat": "{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Call Latency time", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/.*cached items.*/" + }, + "properties": [ + { + "id": "custom.axisLabel", + "value": "Items" + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": "/.*consumers.*/" + }, + "properties": [ + { + "id": "custom.axisLabel", + "value": "Queued consumers" + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": "/.memory usage*/" + }, + "properties": [ + { + "id": "unit", + "value": "decbytes" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 236 + }, + "id": 198, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_cached_count{service=\"$service\", namespace=\"$env\", cache=\"headers\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Headers cache cached items", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_queued_consumers_count{service=\"$service\", namespace=\"$env\", cache=\"receipts\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receipts cache queued consumers", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_queued_consumers_count{service=\"$service\", namespace=\"$env\", cache=\"headers\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Headers cache queued consumers", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_queued_consumers_count{service=\"$service\", namespace=\"$env\", cache=\"blocks\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Block cache queued consumers", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_memory_usage{service=\"$service\", namespace=\"$env\", cache=\"blocks\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Blocks cache memory usage", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_cached_count{service=\"$service\", namespace=\"$env\", cache=\"receipts\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receipts cache cached items", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_memory_usage{service=\"$service\", namespace=\"$env\", cache=\"receipts\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receipts cache memory usage", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_rpc_eth_cache_cached_count{service=\"$service\", namespace=\"$env\", cache=\"blocks\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Block cache cached items", + "range": true, + "refId": "H", + "useBackend": false + } + ], + "title": "RPC Cache Metrics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 236 + }, + "id": 246, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(rate(reth_rpc_server_calls_successful_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])) by (method) > 0", + "instant": false, + "legendFormat": "{{method}}", + "range": true, + "refId": "A" + } + ], + "title": "RPC Throughput", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 244 + }, + "id": 24, + "panels": [], + "repeat": "instance", + "title": "Downloader: Headers", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "D" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 245 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_headers_total_downloaded{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Downloaded", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_headers_total_flushed{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Flushed", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_total_downloaded{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Downloaded/s", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_total_flushed{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Flushed/s", + "range": true, + "refId": "D" + } + ], + "title": "I/O", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Internal errors in the header downloader. These are expected to happen from time to time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 245 + }, + "id": 33, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_timeout_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "Request timed out", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_unexpected_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Unexpected error", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_headers_validation_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Invalid response", + "range": true, + "refId": "C" + } + ], + "title": "Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of connected peers and in-progress requests for headers.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 253 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_headers_in_flight_requests{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "In flight requests", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_connected_peers{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Connected peers", + "range": true, + "refId": "B" + } + ], + "title": "Requests", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 261 + }, + "id": 32, + "panels": [], + "repeat": "instance", + "title": "Downloader: Bodies", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "D" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 262 + }, + "id": 30, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_total_downloaded{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Downloaded", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_total_flushed{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Flushed", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_total_flushed{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Flushed/s", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_total_downloaded{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Downloaded/s", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_responses{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Buffered responses", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_blocks{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Buffered blocks", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_queued_blocks{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Queued blocks", + "range": true, + "refId": "G" + } + ], + "title": "I/O", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 262 + }, + "id": 28, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_timeout_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "Request timed out", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_unexpected_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Unexpected error", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_downloaders_bodies_validation_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Invalid response", + "range": true, + "refId": "C" + } + ], + "title": "Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of connected peers and in-progress requests for bodies.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 270 + }, + "id": 35, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_in_flight_requests{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "In flight requests", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_connected_peers{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Connected peers", + "range": true, + "refId": "B" + } + ], + "title": "Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of blocks and size in bytes of those blocks", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "blocks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 270 + }, + "id": 73, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Buffered blocks size ", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_buffered_blocks{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Buffered blocks", + "range": true, + "refId": "B" + } + ], + "title": "Downloader buffer", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of blocks in a request and size in bytes of those block responses", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "blocks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 278 + }, + "id": 102, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_downloaders_bodies_response_response_size_bytes{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Response size", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_length{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Individual response length (number of bodies in response)", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_downloaders_bodies_response_response_size_bytes / reth_downloaders_bodies_response_response_length{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Mean body size in response", + "range": true, + "refId": "C" + } + ], + "title": "Block body response sizes", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 286 + }, + "id": 226, + "panels": [], + "title": "Eth Requests", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 287 + }, + "id": 225, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_headers_requests_received_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Headers Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Headers Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 287 + }, + "id": 227, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_receipts_requests_received_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Receipts Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Receipts Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 295 + }, + "id": 235, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_bodies_requests_received_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Bodies Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Bodies Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 295 + }, + "id": 234, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_node_data_requests_received_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Node Data Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Node Data Requests Received", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 303 + }, + "id": 68, + "panels": [], + "repeat": "instance", + "title": "Payload Builder", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Number of active jobs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 304 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_payloads_active_jobs{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Active Jobs", + "range": true, + "refId": "A" + } + ], + "title": "Active Jobs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Total number of initiated jobs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 304 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_payloads_initiated_jobs{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Initiated Jobs", + "range": true, + "refId": "A" + } + ], + "title": "Initiated Jobs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Total number of failed jobs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 312 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_payloads_failed_jobs{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Failed Jobs", + "range": true, + "refId": "A" + } + ], + "title": "Failed Jobs", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 320 + }, + "id": 105, + "panels": [], + "title": "Pruning", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 321 + }, + "id": 106, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_pruner_duration_seconds_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) / rate(reth_pruner_duration_seconds_count{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Pruner duration, total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 321 + }, + "id": 107, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "rate(reth_pruner_segments_duration_seconds_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) / rate(reth_pruner_segments_duration_seconds_count{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "instant": false, + "legendFormat": "{{segment}}", + "range": true, + "refId": "A" + } + ], + "title": "Pruner duration, per segment", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 329 + }, + "id": 217, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Archive and full nodes prune headers, transactions and receipts in MDBX (hot db) after they have been written to static files (cold db). Full nodes additionally prune history indices.", + "editorMode": "code", + "expr": "reth_pruner_segments_highest_pruned_block{service=\"$service\", namespace=\"$env\"}", + "instant": false, + "legendFormat": "{{segment}}", + "range": true, + "refId": "A" + } + ], + "title": "Highest pruned block, per segment", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 337 + }, + "id": 97, + "panels": [], + "title": "Process", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Retained" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 338 + }, + "id": 98, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_active{service=\"$service\", namespace=\"$env\"}", + "instant": false, + "legendFormat": "Active", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_allocated{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Allocated", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_mapped{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Mapped", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_metadata{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Metadata", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_resident{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Resident", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_jemalloc_retained{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Retained", + "range": true, + "refId": "F" + } + ], + "title": "Jemalloc Memory", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 338 + }, + "id": 101, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_process_resident_memory_bytes{service=\"$service\", namespace=\"$env\"}", + "instant": false, + "legendFormat": "Resident", + "range": true, + "refId": "A" + } + ], + "title": "Memory", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "100% = 1 core", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 346 + }, + "id": 99, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "avg(rate(reth_process_cpu_seconds_total{service=\"$service\", namespace=\"$env\"}[1m]))", + "instant": false, + "legendFormat": "Process", + "range": true, + "refId": "A" + } + ], + "title": "CPU", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 346 + }, + "id": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_process_open_fds{service=\"$service\", namespace=\"$env\"}", + "instant": false, + "legendFormat": "Open", + "range": true, + "refId": "A" + } + ], + "title": "File Descriptors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks the number of critical tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "semi-dark-red", + "value": 0 + } + ] + }, + "unit": "tasks" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 354 + }, + "id": 248, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_critical_tasks_total{service=\"$service\", namespace=\"$env\"}- reth_executor_spawn_finished_critical_tasks_total{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor critical tasks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks the number of regular tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 354 + }, + "id": 247, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_tasks_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_tasks_total{service=\"$service\", namespace=\"$env\"} - reth_executor_spawn_finished_regular_tasks_total{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular tasks", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 362 + }, + "id": 236, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The total number of canonical state notifications sent to ExExes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 370 + }, + "id": 237, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_exex_notifications_sent_total{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Total Notifications Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Notifications Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The total number of events ExExes have sent to the manager.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 370 + }, + "id": 238, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_exex_events_sent_total{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Total Events Sent", + "range": true, + "refId": "B" + } + ], + "title": "Total Events Sent", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Current and Maximum capacity of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 378 + }, + "id": 239, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_current_capacity{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Current size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "max_over_time(reth_exex_manager_max_capacity{service=\"$service\", namespace=\"$env\"}[1h])", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "C" + } + ], + "title": "Current and Max Capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Current size of the internal state notifications buffer.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 378 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_buffer_size{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Max size", + "range": true, + "refId": "B" + } + ], + "title": "Buffer Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Total number of ExExes installed in the node", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 386 + }, + "id": 220, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_exex_manager_num_exexs{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Number of ExExs", + "range": true, + "refId": "A" + } + ], + "title": "Number of ExExes", + "type": "stat" + } + ], + "title": "Execution Extensions", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 363 + }, + "id": 241, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 371 + }, + "id": 243, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_exex_wal_lowest_committed_block_height{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Lowest Block", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_exex_wal_highest_committed_block_height{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Highest Block", + "range": true, + "refId": "C" + } + ], + "title": "Current Committed Block Heights", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 371 + }, + "id": 244, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_exex_wal_committed_blocks_count{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Committed Blocks", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_exex_wal_notifications_count{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "Notifications", + "range": true, + "refId": "B" + } + ], + "title": "Number of entities", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 379 + }, + "id": 245, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_exex_wal_size_bytes{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "C" + } + ], + "title": "Total size of all notifications", + "type": "timeseries" + } + ], + "title": "Execution Extensions Write-Ahead Log", + "type": "row" + } + ], + "refresh": "5s", + "schemaVersion": 41, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "mainnet", + "value": "mainnet" + }, + "definition": "label_values(env)", + "name": "env", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(env)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(sepolia|mainnet)-eks.*", + "type": "query" + }, + { + "current": { + "text": "l1reth-el-0", + "value": "l1reth-el-0" + }, + "definition": "label_values(reth_info{namespace=\"$env\"},service)", + "name": "service", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(reth_info{namespace=\"$env\"},service)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(l[1|2]reth.*)", + "type": "query" + } + ] + }, + "time": { + "from": "now-12h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth", + "uid": "celrs7xmuowe8a", + "version": 3, + "weekStart": "" +} \ No newline at end of file
diff --git reth/etc/grafana/scroll/reth-discovery.json scroll-reth/etc/grafana/scroll/reth-discovery.json new file mode 100644 index 0000000000000000000000000000000000000000..efe61913cf2f8a31edd870cacd87c86faf848795 --- /dev/null +++ scroll-reth/etc/grafana/scroll/reth-discovery.json @@ -0,0 +1,1206 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.3.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Devp2p peer discovery protocols", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 96, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "light-purple", + "value": null + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.3.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 89, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Discv5", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Peers managed by underlying sigp/discv5 node. \n\nOnly peers in the kbuckets are queried in FINDNODE lookups, and included in NODES responses to other peers.\n\nNot all peers with an established session will make it into the kbuckets, due to e.g. reachability issues (NAT) and capacity of kbuckets furthest log2distance away from local node (XOR metrics).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Total peers kbuckets" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#9b73d6", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Total connected sessions" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "super-light-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 198, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_kbucket_peers_raw_total{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total peers kbuckets", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_discv5_sessions_raw_total{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total connected sessions", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Frequency of session establishment and kbuckets insertions.\n\nSince discv5 favours long-lived connections, kbuckets insertions are expected to be less frequent the longer the node stays online.\n\nSome incoming connections may be from peers with unreachable ENRs, ENRs that don't advertise a UDP socket. These peers are not useful for the discv5 node, nor for RLPx.\n\nDiscovered peers are filtered w.r.t. what they advertise in their ENR. By default peers advertising 'eth2' are filtered out. Unreachable ENRs are also filtered out. Only peers that pass the filter are useful. These peers get passed up the node, to attempt an RLPx connection.\n\nSessions will succeed to peers that advertise no UDP socket in their ENR. This allows peers to discover their reachable socket. On the other hand, for DoS protection, peers that advertise a different socket than the socket from which they make the connection, are denied a sigp/discv5 session. These peers have an unverifiable ENR. The peers are passed to RLPx nonetheless (some EL implementations of discv5 are more lax about ENR and source socket matching). ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Total Session Establishments" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Total KBucket Insertions" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#9958f4", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Session Establishments (pass filter)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ada", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Session Establishments (unreachable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Session Establishment Failed (unverifiable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Failed Session Establishments (unverifiable ENR)" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8ab8ff", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 199, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_inserted_kbucket_peers_raw_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total KBucket Insertions", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_established_sessions_raw_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total Session Establishments", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_established_sessions_unreachable_enr_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (unreachable ENR)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_established_sessions_raw_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) - rate(reth_discv5_established_sessions_custom_filtered_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Session Establishments (pass filter)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_discv5_unverifiable_enrs_raw_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Session Establishments (unverifiable ENR)", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Peer Churn", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Frequency of discovering peers from some popular network stacks.\n\nSome nodes miss advertising a fork ID kv-pair in their ENR. They will be counted as 'unknown', but may belong to a popular network.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps", + "unitScale": true + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Eth" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#b677d9", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Eth2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Unknown" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#ff0ae5", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "OP EL" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "#8AB8FF", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 200, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_eth2{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eth2", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opel{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP EL", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(reth_discv5_opstack{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "OP CL", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "(rate(reth_discv5_established_sessions_raw_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) + rate(reth_discv5_unverifiable_enrs_raw_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])) - (rate(reth_discv5_eth{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) + rate(reth_discv5_eth2{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) + rate(reth_discv5_opstack{service=\"$service\", namespace=\"$env\"}[$__rate_interval]) + rate(reth_discv5_opel{service=\"$service\", namespace=\"$env\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Unknown", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Advertised Network Stacks", + "type": "timeseries" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "mainnet", + "value": "mainnet" + }, + "definition": "label_values(env)", + "name": "env", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(env)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(sepolia|mainnet)-eks.*", + "type": "query" + }, + { + "current": { + "text": "l1reth-el-0", + "value": "l1reth-el-0" + }, + "definition": "label_values(reth_info{namespace=\"$env\"},service)", + "name": "service", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(reth_info{namespace=\"$env\"},service)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(l[1|2]reth.*)", + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - Peer Discovery", + "uid": "belrsq6vhwetcb", + "version": 1, + "weekStart": "" +} \ No newline at end of file
diff --git reth/etc/grafana/scroll/reth-mempool.json scroll-reth/etc/grafana/scroll/reth-mempool.json new file mode 100644 index 0000000000000000000000000000000000000000..3aeb099035a99fb78635fd19a9b0e59472a69fec --- /dev/null +++ scroll-reth/etc/grafana/scroll/reth-mempool.json @@ -0,0 +1,3991 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "11.2.0" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Metrics for transaction P2P gossip and the local view of mempool data", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 96, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 1 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 1 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 1 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 89, + "panels": [], + "repeat": "instance", + "repeatDirection": "h", + "title": "Transaction Pool", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks the entries, byte size, failed inserts and file deletes of the blob store", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 115, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_entries{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Entries", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_byte_size{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Bytesize", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_failed_inserts{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Inserts", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blobstore_failed_deletes{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed Deletes", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Blob store", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks a heuristic of the memory footprint of the various transaction pool sub-pools", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 210, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_basefee_pool_size_bytes{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Base Fee Pool Size", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_pending_pool_size_bytes{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Pending Pool Size", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_queued_pool_size_bytes{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Queued Pool Size", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_pool_size_bytes{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Blob Pool Size", + "range": true, + "refId": "D" + } + ], + "title": "Subpool Sizes in Bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Transaction pool maintenance metrics", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 91, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_dirty_accounts{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Dirty Accounts", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_drift_count{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Drift Count", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_reinserted_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Reinserted Transactions", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_deleted_tracked_finalized_blobs{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Deleted Tracked Finalized Blobs", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "TxPool Maintenance", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks the number of transactions in the various transaction pool sub-pools", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 92, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_basefee_pool_transactions{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Base Fee Pool Transactions", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_pending_pool_transactions{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Pending Pool Transactions", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_queued_pool_transactions{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Queued Pool Transactions", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_pool_transactions{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Blob Pool Transactions", + "range": true, + "refId": "D" + } + ], + "title": "Subpool Transaction Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks the number of transactions per second that are inserted and removed from the transaction pool, as well as the number of invalid transactions per second.\n\nBad transactions are a subset of invalid transactions, these will never be successfully imported. The remaining invalid transactions have a chance of being imported, for example transactions with nonce gaps.\n\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": true, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 93, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_transaction_pool_inserted_transactions{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Inserted Transactions", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_transaction_pool_removed_transactions{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Removed Transactions", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_transaction_pool_invalid_transactions{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Invalid Transactions", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_bad_imports{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Bad Transactions", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "Inserted Transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Number of transactions about to be imported into the pool.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 94, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_pending_pool_imports{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Transactions Pending Import", + "range": true, + "refId": "C" + } + ], + "title": "Pending Pool Imports", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Tracks the number of incoming transaction messages in the channel from the network to the transaction pool.\n\nMempool messages sent over this channel are `GetPooledTransactions` requests, `NewPooledTransactionHashes` announcements (gossip), and `Transactions` (gossip)\n\nTx - `NetworkManager`\n\\nRx - `TransactionsManager`", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": true, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "mps" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "B" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + }, + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "events" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 29 + }, + "id": 95, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_network_pool_transactions_messages_sent_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "Tx", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_network_pool_transactions_messages_received_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Rx", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_network_pool_transactions_messages_sent_total{service=\"$service\", namespace=\"$env\"} - reth_network_pool_transactions_messages_received_total{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Messages in Channel", + "range": true, + "refId": "C" + } + ], + "title": "Incoming Gossip and Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Measures the message send rate (MPS) for queued outgoing messages. Outgoing messages are added to the queue before being sent to other peers, and this metric helps track the rate of message dispatch.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "mps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_queued_outgoing_messages{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Queued Messages per Second", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Queued Outgoing Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "All Transactions metrics", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 116, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_all_transactions_by_hash{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "All transactions by hash", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_all_transactions_by_id{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "All transactions by id", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_all_transactions_by_all_senders{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "All transactions by all senders", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_transactions_nonce_gaps{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Blob transactions nonce gaps", + "range": true, + "refId": "D", + "useBackend": false + } + ], + "title": "All Transactions metrics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Egress RLPx capability traffic (default only `eth` capability)\n\nDropped - session channels are bounded, if there's no capacity, the message will be dropped.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "mps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 217, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_total_outgoing_peer_messages_dropped{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Dropped", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Outgoing Capability Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Total number of times a transaction is sent/announced that is already in the local pool.\n\nThis reflects the redundancy in the mempool.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_hashes_already_in_pool{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Announced Transactions Already in Pool", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_transactions_already_in_pool{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Received Transactions Already in Pool ", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Frequency of Transactions Already in Pool", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Currently active outgoing GetPooledTransactions requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 104, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_inflight_transaction_requests{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Inflight Transaction Requests", + "range": true, + "refId": "C" + } + ], + "title": "Inflight Transaction Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Duration of one call to poll `TransactionsManager` future, and its nested function calls.\n\nNetwork Events - stream peer session updates from `NetworkManager`;\nTransaction Events - stream txns gossip from `NetworkManager`;\nPending Transactions - stream hashes of txns successfully inserted into pending set in `TransactionPool`;\nPending Pool Imports - flush txns to pool from `TransactionsManager`;\nFetch Events - stream fetch txn events (success case wraps a tx) from `TransactionFetcher`;\nFetch Pending Hashes - search for hashes announced by an idle peer in cache for hashes pending fetch;\n(Transactions Commands - stream commands from testnet to fetch/serve/propagate txns)\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 200, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_network_events{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Network Events", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_transaction_events{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Transaction Events", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_network_acc_duration_poll_imported_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Pending Transactions", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_pending_pool_imports{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Pending Pool Imports", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_fetch_events{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Fetch Events", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_commands{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Commands", + "range": true, + "refId": "G", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_fetch_pending_hashes{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Fetch Pending Hashes", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_tx_manager{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total Transactions Manager Future", + "range": true, + "refId": "H", + "useBackend": false + } + ], + "title": "Transactions Manager Poll Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 199, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_network_hashes_pending_fetch{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Hashes in Pending Fetch Cache", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_network_inflight_transaction_requests{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Hashes in Inflight Requests", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_network_hashes_inflight_transaction_requests{service=\"$service\", namespace=\"$env\"}) + sum(reth_network_hashes_pending_fetch{service=\"$service\", namespace=\"$env\"})", + "hide": false, + "instant": false, + "legendFormat": "Total Hashes in Transaction Fetcher", + "range": true, + "refId": "C" + } + ], + "title": "Transaction Fetcher Hashes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Durations of one call to poll `NetworkManager` future, and its nested function calls.\n\nNetwork Handle Message - stream network handle messages from `TransactionsManager`;\nSwarm Events - stream transaction gossip from `Swarm`", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 61 + }, + "id": 209, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_network_handle{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Network Handle Messages", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_acc_duration_poll_swarm{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Swarm Events", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_poll_network_manager{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total Network Manager Future", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Network Manager Poll Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Frequency of a peer sending a transaction that has already been marked as seen by that peer. This could for example be the case if a transaction is sent/announced to the peer at the same time that the peer sends/announces the same transaction to us.\n\nThis reflects the latency in the mempool.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 61 + }, + "id": 208, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_hash_already_seen_by_peer{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Announced Transactions Already Seen by Peer", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_occurrences_of_transaction_already_seen_by_peer{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Freq Received Transactions Already Seen by Peer", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Frequency of Transactions Already Marked as Seen by Peer", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Number of all transactions of all sub-pools by type", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 69 + }, + "id": 218, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_legacy_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Legacy", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip2930_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-2930", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip1559_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-1559", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip4844_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-4844", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip7702_transactions{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-7702", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Transactions by Type in Pool", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Duration of one call to `TransactionFetcher::on_fetch_pending_hashes`.\n\nFind Peer - find an idle fallback peer for a hash pending fetch.\n\nFill Request - fill `GetPooledTransactions` request, for the found peer, with more hashes from cache of hashes pending fetch. ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 69 + }, + "id": 215, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_find_idle_fallback_peer_for_any_pending_hash{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Find Idle Peer", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_network_duration_fill_request_from_hashes_pending_fetch{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Fill Request", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Fetch Hashes Pending Fetch Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Frequency of transaction types seen in announcements", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 77 + }, + "id": 214, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_legacy_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Legacy", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip2930_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip2930", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip1559_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip1559", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip4844_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip4844", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_transaction_fetcher_eip7702_sum{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Eip7702", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Announced Transactions by Type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Number of transactions evicted in each pool", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 77 + }, + "id": 220, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_pending_transactions_evicted{service=\"$service\", namespace=\"$env\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "PendingPool", + "range": true, + "refId": "A", + "useBackend": false, + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + } + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_transaction_pool_basefee_transactions_evicted{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "BasefeePool", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_transaction_pool_blob_transactions_evicted{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "BlobPool", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "reth_transaction_pool_queued_transactions_evicted{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "instant": false, + "legendFormat": "QueuedPool", + "range": true, + "refId": "D" + } + ], + "title": "Evicted Transactions", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 85 + }, + "id": 6, + "panels": [], + "repeat": "instance", + "title": "Networking", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of tracked peers in the discovery modules (dnsdisc and discv4)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 86 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_tracked_peers{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Tracked Peers", + "range": true, + "refId": "A" + } + ], + "title": "Discovery: Tracked peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "The number of incoming and outgoing connections, as well as the number of peers we are currently connected to. Outgoing and incoming connections also count peers we are trying to connect to.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 86 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_pending_outgoing_connections{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Pending Outgoing Connections", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_outgoing_connections{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Outgoing Connections", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_total_pending_connections{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Total Pending Connections", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_incoming_connections{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Incoming Connections", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_connected_peers{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "Connected Peers", + "range": true, + "refId": "E" + } + ], + "title": "Connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Internal errors in the P2P module. These are expected to happen from time to time. High error rates should not cause alarm if the node is peering otherwise.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "red", + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "cps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 86 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_p2pstream_disconnected_errors{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "legendFormat": "P2P Stream Disconnected", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_network_pending_session_failures{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Failed Pending Sessions", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "rate(reth_network_invalid_messages_received_total{service=\"$service\", namespace=\"$env\"}[$__rate_interval])", + "hide": false, + "legendFormat": "Invalid Messages", + "range": true, + "refId": "C" + } + ], + "title": "P2P Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 94 + }, + "id": 54, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_useless_peer{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "UselessPeer", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_subprotocol_specific{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "SubprotocolSpecific", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_already_connected{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "AlreadyConnected", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_client_quitting{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "ClientQuitting", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_unexpected_identity{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "UnexpectedHandshakeIdentity", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_disconnect_requested{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "DisconnectRequested", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_null_node_identity{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "NullNodeIdentity", + "range": true, + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_tcp_subsystem_error{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "TCPSubsystemError", + "range": true, + "refId": "H" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_incompatible{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "IncompatibleP2PVersion", + "range": true, + "refId": "I" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_protocol_breach{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "ProtocolBreach", + "range": true, + "refId": "J" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_too_many_peers{service=\"$service\", namespace=\"$env\"}", + "hide": false, + "legendFormat": "TooManyPeers", + "range": true, + "refId": "K" + } + ], + "title": "Peer Disconnect Reasons", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "description": "Number of successful outgoing dial attempts.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 14, + "x": 8, + "y": 94 + }, + "id": 103, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "expr": "reth_network_total_dial_successes{service=\"$service\", namespace=\"$env\"}", + "legendFormat": "Total Dial Successes", + "range": true, + "refId": "A" + } + ], + "title": "Total Dial Success", + "type": "timeseries" + } + ], + "refresh": "30s", + "revision": 1, + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "mainnet", + "value": "mainnet" + }, + "definition": "label_values(env)", + "name": "env", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(env)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(sepolia|mainnet)-eks.*", + "type": "query" + }, + { + "current": { + "text": "l1reth-el-0", + "value": "l1reth-el-0" + }, + "definition": "label_values(reth_info{namespace=\"$env\"},service)", + "name": "service", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(reth_info{namespace=\"$env\"},service)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(l[1|2]reth.*)", + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - Transaction Pool", + "uid": "delrsml3waqdce", + "version": 2, + "weekStart": "" +} \ No newline at end of file
diff --git reth/etc/grafana/scroll/reth-state-growth.json scroll-reth/etc/grafana/scroll/reth-state-growth.json new file mode 100644 index 0000000000000000000000000000000000000000..6c7b39c43b40827379ceee903d15c08541a01f96 --- /dev/null +++ scroll-reth/etc/grafana/scroll/reth-state-growth.json @@ -0,0 +1,1769 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Ethereum state growth", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 3, + "y": 0 + }, + "id": 192, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_timestamp}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Timestamp", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 193, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{git_sha}}", + "range": false, + "refId": "A" + } + ], + "title": "Git SHA", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 12, + "y": 0 + }, + "id": 195, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{build_profile}}", + "range": false, + "refId": "A" + } + ], + "title": "Build Profile", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 14, + "y": 0 + }, + "id": 196, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{target_triple}}", + "range": false, + "refId": "A" + } + ], + "title": "Target Triple", + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 19, + "y": 0 + }, + "id": 197, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "reth_info{service=\"$service\", namespace=\"$env\"}", + "instant": true, + "legendFormat": "{{cargo_features}}", + "range": false, + "refId": "A" + } + ], + "title": "Cargo Features", + "transparent": true, + "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 7, + "panels": [], + "title": "State", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 4 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainStorageState\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainAccountState\"}[$interval])) + avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainStorageState\"}[$interval])) + avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Bytecodes\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 4 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainAccountState\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Account", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainStorageState\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Storage", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Bytecodes", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=~\"PlainAccountState|PlainStorageState|Bytecodes\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Total", + "range": true, + "refId": "D" + } + ], + "title": "State Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainAccountState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Account State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 14 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"PlainStorageState\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Storage State Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Bytecodes\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytecodes Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 8, + "panels": [], + "title": "History", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"receipts\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"transactions\"}[$interval]))", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Growth (interval = ${interval})", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Headers", + "reducer": "sum", + "right": "Receipts" + }, + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 14, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Headers\"}) + sum(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"headers\"})", + "instant": false, + "interval": "$interval", + "legendFormat": "Headers", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Receipts\"}) + sum(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"receipts\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Receipts", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "sum(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Transactions\"}) + sum(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"transactions\"})", + "hide": false, + "instant": false, + "interval": "$interval", + "legendFormat": "Transactions", + "range": true, + "refId": "C" + } + ], + "title": "History Size", + "transformations": [ + { + "id": "calculateField", + "options": { + "mode": "reduceRow", + "reduce": { + "include": [ + "Headers", + "Receipts", + "Transactions" + ], + "reducer": "sum" + } + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Headers\"}[$interval])) + avg(delta(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"headers\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Headers Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Receipts\"}[$interval])) + avg(delta(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"receipts\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Receipts Growth (interval = ${interval})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 55 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "o59qe-zVz" + }, + "editorMode": "code", + "expr": "avg(delta(reth_db_table_size{service=\"$service\", namespace=\"$env\", table=\"Transactions\"}[$interval])) + avg(delta(reth_static_files_segment_size{service=\"$service\", namespace=\"$env\", segment=\"transactions\"}[$interval]))", + "instant": false, + "interval": "$interval", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions Growth (interval = ${interval})", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "mainnet", + "value": "mainnet" + }, + "definition": "label_values(env)", + "name": "env", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(env)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(sepolia|mainnet)-eks.*", + "type": "query" + }, + { + "current": { + "text": "l1reth-el-0", + "value": "l1reth-el-0" + }, + "definition": "label_values(reth_info{namespace=\"$env\"},service)", + "name": "service", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(reth_info{namespace=\"$env\"},service)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "(l[1|2]reth.*)", + "type": "query" + }, + { + "current": { + "selected": true, + "text": "10m", + "value": "10m" + }, + "hide": 0, + "includeAll": false, + "label": "Interval", + "multi": false, + "name": "interval", + "options": [ + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": true, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "5m,10m,30m,1h,6h,12h,1d,7d,14d,30d", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Reth - State & History", + "uid": "delrsnxu0zrwgd", + "version": 1, + "weekStart": "" +} \ No newline at end of file
diff --git reth/examples/custom-beacon-withdrawals/src/main.rs scroll-reth/examples/custom-beacon-withdrawals/src/main.rs index a72b2c44487cbacd6a7161e95cd2b96e0001a70b..1d93226dd6a90ba1e5e786487c34c9ff20950f1d 100644 --- reth/examples/custom-beacon-withdrawals/src/main.rs +++ scroll-reth/examples/custom-beacon-withdrawals/src/main.rs @@ -8,7 +8,7 @@ use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor, ExecutableTx}, eth::{EthBlockExecutionCtx, EthBlockExecutor}, precompiles::PrecompilesMap, - revm::context::result::ResultAndState, + revm::context::{result::ResultAndState, Block as _}, EthEvm, EthEvmFactory, }; use alloy_sol_macro::sol; @@ -271,7 +271,7 @@ };   // Clean-up post system tx context state.remove(&SYSTEM_ADDRESS); - state.remove(&evm.block().beneficiary); + state.remove(&evm.block().beneficiary());   evm.db_mut().commit(state);
diff --git reth/examples/custom-dev-node/src/main.rs scroll-reth/examples/custom-dev-node/src/main.rs index f700cf9e89a6c881c1f351a0c065754a31ae678e..c5441a2b38862bdf220f9e732e03fd0f6d2bec21 100644 --- reth/examples/custom-dev-node/src/main.rs +++ scroll-reth/examples/custom-dev-node/src/main.rs @@ -33,7 +33,7 @@ let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) .testing_node(tasks.executor()) .node(EthereumNode::default()) - .launch() + .launch_with_debug_capabilities() .await?;   let mut notifications = node.provider.canonical_state_stream();
diff --git reth/examples/custom-evm/src/main.rs scroll-reth/examples/custom-evm/src/main.rs index b5e69670ec778d70c0fe5c923afad58c8ea8d51c..e32f0be6bd5b8e3a1ae7ac9ec02876a3e4b3b5f1 100644 --- reth/examples/custom-evm/src/main.rs +++ scroll-reth/examples/custom-evm/src/main.rs @@ -18,7 +18,7 @@ chainspec::{Chain, ChainSpec}, evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -54,6 +54,7 @@ type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError>; type HaltReason = HaltReason; type Context<DB: Database> = EthEvmContext<DB>; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap;   fn create_evm<DB: Database>(&self, db: DB, input: EvmEnv) -> Self::Evm<DB, NoOpInspector> {
diff --git reth/examples/custom-node-components/src/main.rs scroll-reth/examples/custom-node-components/src/main.rs index b6b8fb3cdf2e7c499f0f097b9fbbd194e74be845..c4e571762e8709e037d66be350d3b5bb2e3fb878 100644 --- reth/examples/custom-node-components/src/main.rs +++ scroll-reth/examples/custom-node-components/src/main.rs @@ -95,6 +95,7 @@ ctx.task_executor().spawn_critical( "txpool maintenance task", reth_ethereum::pool::maintain::maintain_transaction_pool_future( client, + ctx.chain_spec(), pool, chain_events, ctx.task_executor().clone(),
diff --git reth/examples/custom-node/Cargo.toml scroll-reth/examples/custom-node/Cargo.toml index fe1f00062566a8b1e10017a5c0b8d7b4de061853..9ac414b71786d6677455a2c5dd6b8218b9fdced5 100644 --- reth/examples/custom-node/Cargo.toml +++ scroll-reth/examples/custom-node/Cargo.toml @@ -16,6 +16,7 @@ reth-optimism-flashblocks.workspace = true reth-db-api.workspace = true reth-op = { workspace = true, features = ["node", "pool", "rpc"] } reth-payload-builder.workspace = true +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-engine-primitives.workspace = true reth-rpc-engine-api.workspace = true @@ -68,5 +69,6 @@ "alloy-rpc-types-engine/arbitrary", "reth-db-api/arbitrary", "alloy-rpc-types-eth/arbitrary", "op-alloy-rpc-types/arbitrary", + "reth-primitives-traits/arbitrary", ] default = []
diff --git reth/examples/custom-node/src/evm/alloy.rs scroll-reth/examples/custom-node/src/evm/alloy.rs index 6071a2c6dd8697810df7d72cd6a0d145159565ab..d8df842cfc50889fb3177f4920828db7f893c1d4 100644 --- reth/examples/custom-node/src/evm/alloy.rs +++ scroll-reth/examples/custom-node/src/evm/alloy.rs @@ -40,6 +40,7 @@ type Tx = CustomTxEnv; type Error = EVMError<DB::Error, OpTransactionError>; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I;   @@ -103,6 +104,7 @@ type Tx = CustomTxEnv; type Error<DBError: Error + Send + Sync + 'static> = EVMError<DBError, OpTransactionError>; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap;   fn create_evm<DB: Database>(
diff --git reth/examples/custom-node/src/evm/env.rs scroll-reth/examples/custom-node/src/evm/env.rs index 5508ec4e6d0f1004376389b500ff4264d0c4aff2..53a2b4e3f150a09080aab332366829d4373b51bb 100644 --- reth/examples/custom-node/src/evm/env.rs +++ scroll-reth/examples/custom-node/src/evm/env.rs @@ -1,6 +1,7 @@ use crate::primitives::{CustomTransaction, TxPayment}; use alloy_eips::{eip2930::AccessList, Typed2718}; use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv}; +use alloy_op_evm::block::OpTxEnv; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::OpTxEnvelope; use op_revm::OpTransaction; @@ -328,3 +329,12 @@ fn into_tx_env(self) -> Self { self } } + +impl OpTxEnv for CustomTxEnv { + fn encoded_bytes(&self) -> Option<&Bytes> { + match self { + Self::Op(tx) => tx.encoded_bytes(), + Self::Payment(_) => None, + } + } +}
diff --git reth/examples/custom-node/src/primitives/header.rs scroll-reth/examples/custom-node/src/primitives/header.rs index 946bad51894c6fe1478f1158d811501f6ddc4149..0e28cca3987d3cd2f0a89a3460fa9245f6d67d86 100644 --- reth/examples/custom-node/src/primitives/header.rs +++ scroll-reth/examples/custom-node/src/primitives/header.rs @@ -179,6 +179,12 @@ Ok(obj) } }   +impl reth_primitives_traits::block::header::BlockHeaderMut for CustomHeader { + fn extra_data_mut(&mut self) -> &mut Bytes { + &mut self.inner.extra_data + } +} + impl BlockHeader for CustomHeader {}   impl RlpBincode for CustomHeader {}
diff --git reth/examples/exex-subscription/src/main.rs scroll-reth/examples/exex-subscription/src/main.rs index eb7ffaaf75453a94c3c57a9359f6b4ad971331ae..2f0c38f3852d3a81b4317c3b1d453070076bd598 100644 --- reth/examples/exex-subscription/src/main.rs +++ scroll-reth/examples/exex-subscription/src/main.rs @@ -4,7 +4,6 @@ //! An ExEx example that installs a new RPC subscription endpoint that emits storage changes for a //! requested address. #[allow(dead_code)] use alloy_primitives::{Address, U256}; -use clap::Parser; use futures::TryStreamExt; use jsonrpsee::{ core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink, @@ -166,14 +165,8 @@ Ok(()) }   -#[derive(Parser, Debug)] -struct Args { - #[arg(long)] - enable_ext: bool, -} - fn main() -> eyre::Result<()> { - reth_ethereum::cli::Cli::parse_args().run(|builder, _args| async move { + reth_ethereum::cli::Cli::parse_args().run(|builder, _| async move { let (subscriptions_tx, subscriptions_rx) = mpsc::unbounded_channel::<SubscriptionRequest>();   let rpc = StorageWatcherRpc::new(subscriptions_tx.clone());
diff --git reth/examples/node-custom-rpc/src/main.rs scroll-reth/examples/node-custom-rpc/src/main.rs index 3c7c9269f58077f6e4597b7561f4d13b875f8dda..2af789a989cd569deebf88e6c133c7ed9236da90 100644 --- reth/examples/node-custom-rpc/src/main.rs +++ scroll-reth/examples/node-custom-rpc/src/main.rs @@ -53,7 +53,7 @@ Ok(()) }) // launch the node with custom rpc - .launch() + .launch_with_debug_capabilities() .await?;   handle.wait_for_node_exit().await
diff --git reth/examples/precompile-cache/src/main.rs scroll-reth/examples/precompile-cache/src/main.rs index dcaa886d73628742f874cba3e57a283eb723f6f7..fe748db46362a7e5389a6622ed4af21eef9aac64 100644 --- reth/examples/precompile-cache/src/main.rs +++ scroll-reth/examples/precompile-cache/src/main.rs @@ -16,7 +16,7 @@ chainspec::{Chain, ChainSpec}, evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -69,6 +69,7 @@ type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError>; type HaltReason = HaltReason; type Context<DB: Database> = EthEvmContext<DB>; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap;   fn create_evm<DB: Database>(&self, db: DB, input: EvmEnv) -> Self::Evm<DB, NoOpInspector> { @@ -176,7 +177,7 @@ async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> { let evm_config = EthEvmConfig::new_with_evm_factory( ctx.chain_spec(), - MyEvmFactory { precompile_cache: self.precompile_cache.clone() }, + MyEvmFactory { precompile_cache: self.precompile_cache }, ); Ok(evm_config) }
diff --git reth/testing/ef-tests/src/cases/blockchain_test.rs scroll-reth/testing/ef-tests/src/cases/blockchain_test.rs index 0526efaa6efca0bc8875ec4760b5888bae535518..c06ac05a6d537b7caabd7758c10bced73b228c8d 100644 --- reth/testing/ef-tests/src/cases/blockchain_test.rs +++ scroll-reth/testing/ef-tests/src/cases/blockchain_test.rs @@ -268,7 +268,7 @@ let state_db = StateProviderDatabase(&state_provider); let executor = executor_provider.batch_executor(state_db);   let output = executor - .execute_with_state_closure_always(&(*block).clone(), |statedb: &State<_>| { + .execute_with_state_closure_always(&(*block).clone(), |statedb: &mut State<_>| { witness_record.record_executed_state(statedb); }) .map_err(|err| Error::block_failed(block_number, program_inputs.clone(), err))?;
diff --git reth/testing/ef-tests/tests/tests.rs scroll-reth/testing/ef-tests/tests/tests.rs index 0961817e901c147bdf44ebf82ee30486d5f8272e..2728246901ac780143b2535a8b2205d668348414 100644 --- reth/testing/ef-tests/tests/tests.rs +++ scroll-reth/testing/ef-tests/tests/tests.rs @@ -93,7 +93,7 @@ let suite_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("ethereum-tests") .join("BlockchainTests");   - BlockchainTests::new(suite_path).run_only(&format!("{}", stringify!($dir))); + BlockchainTests::new(suite_path).run_only(stringify!($dir)); } }; }
diff --git reth/.github/assets/check_rv32imac.sh scroll-reth/.github/assets/check_rv32imac.sh index 9d9c421ca208c14d9f85466a5cb993eefc0a54b4..49bea6a900d96c540288e18633fbd6533ddbe672 100755 --- reth/.github/assets/check_rv32imac.sh +++ scroll-reth/.github/assets/check_rv32imac.sh @@ -36,6 +36,14 @@ reth-optimism-forks reth-optimism-consensus reth-optimism-primitives reth-optimism-evm + + ## scroll + reth-scroll-chainspec + scroll-alloy-consensus + scroll-alloy-evm + scroll-alloy-rpc-types + scroll-alloy-rpc-types-engine + )   # Array to hold the results
diff --git reth/.github/assets/check_wasm.sh scroll-reth/.github/assets/check_wasm.sh index 3c72a8d189ed7dc5903c1d3721c7f8fcb40db7a8..9f535a45f7574191dbc6491657245867b95d1743 100755 --- reth/.github/assets/check_wasm.sh +++ scroll-reth/.github/assets/check_wasm.sh @@ -80,6 +80,18 @@ reth-era-downloader # tokio reth-era-utils # tokio reth-tracing-otlp reth-node-ethstats + reth-scroll-cli # tokio + reth-scroll-node # tokio + reth-scroll # tokio + reth-scroll-state-commitment # tokio + reth-scroll-chainspec # tokio + reth-scroll-consensus # c-kzg + reth-scroll-evm # tokio + reth-scroll-rpc #tokio + reth-scroll-engine-primitives # proptest + reth-scroll-payload # c-kzg + reth-scroll-primitives # c-kzg + reth-scroll-txpool )   # Array to hold the results
diff --git reth/.github/assets/hive/build_simulators.sh scroll-reth/.github/assets/hive/build_simulators.sh index dab77772f8eb70bdd9998478f6114961e08fd7c2..709ecc51e01d4310286e26fe04f6d0bcc9bd3b29 100755 --- reth/.github/assets/hive/build_simulators.sh +++ scroll-reth/.github/assets/hive/build_simulators.sh @@ -11,7 +11,7 @@ ./hive -client reth # first builds and caches the client   # Run each hive command in the background for each simulator and wait echo "Building images" -./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.1.0 -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.3.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.3.0 -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & ./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true &
diff --git reth/.github/assets/hive/expected_failures.yaml scroll-reth/.github/assets/hive/expected_failures.yaml index 6a580d9a110eaf2878e694a9efc13f81eb98d5f5..ae3817cfc3d4a2e6ad67f1afe2febef34642872f 100644 --- reth/.github/assets/hive/expected_failures.yaml +++ scroll-reth/.github/assets/hive/expected_failures.yaml @@ -41,18 +41,14 @@ - Blob Transaction Ordering, Multiple Clients (Cancun) (reth)   sync: []   -# https://github.com/ethereum/hive/issues/1277 -engine-auth: - - "JWT Authentication: No time drift, correct secret (Paris) (reth)" - - "JWT Authentication: Negative time drift, within limit, correct secret (Paris) (reth)" - - "JWT Authentication: Positive time drift, within limit, correct secret (Paris) (reth)" +engine-auth: []   # 7702 test - no fix: it’s too expensive to check whether the storage is empty on each creation # 6110 related tests - may start passing when fixtures improve # 7002 related tests - post-fork test, should fix for spec compliance but not # realistic on mainnet # 7251 related tests - modified contract, not necessarily practical on mainnet, -# 7594: https://github.com/paradigmxyz/reth/issues/18471 +# 7594: https://github.com/paradigmxyz/reth/issues/18975 # worth re-visiting when more of these related tests are passing eest/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth
diff --git reth/.github/workflows/bench.yml scroll-reth/.github/workflows/bench.yml index 0203a4654a019d767d7dbe0750afc5ac083da3ba..e917fe760513928bd2881cd1c9ff76dbfdab2ad5 100644 --- reth/.github/workflows/bench.yml +++ scroll-reth/.github/workflows/bench.yml @@ -5,7 +5,7 @@ pull_request: # TODO: Disabled temporarily for https://github.com/CodSpeedHQ/runner/issues/55 # merge_group: push: - branches: [main] + branches: [scroll]   env: CARGO_TERM_COLOR: always @@ -15,8 +15,7 @@ name: bench jobs: codspeed: - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 with: @@ -29,7 +28,7 @@ cache-on-failure: true - name: Install cargo-codspeed uses: taiki-e/install-action@v2 with: - tool: cargo-codspeed + tool: cargo-codspeed@4.0.5 - name: Build the benchmark target(s) run: ./.github/scripts/codspeed-build.sh - name: Run the benchmarks
diff --git reth/.github/workflows/book.yml scroll-reth/.github/workflows/book.yml index 389bd34c700c9338cab1faffc45ae285c768c6e4..deec064b63980c0ef121c50739520cd44eb6d2b3 100644 --- reth/.github/workflows/book.yml +++ scroll-reth/.github/workflows/book.yml @@ -4,16 +4,16 @@ name: book   on: push: - branches: [main] + branches: [main, scroll] pull_request: - branches: [main] + branches: [main, scroll] types: [opened, reopened, synchronize, closed] merge_group:   jobs: build: runs-on: ubuntu-latest - timeout-minutes: 60 + timeout-minutes: 90 steps: - name: Checkout uses: actions/checkout@v5
diff --git reth/.github/workflows/compact.yml scroll-reth/.github/workflows/compact.yml index 8a18df872d28e605f38e4185a4defe8db5bab7b6..9289b10b1f3eab65a9abd609c9f74bec0e5397da 100644 --- reth/.github/workflows/compact.yml +++ scroll-reth/.github/workflows/compact.yml @@ -9,7 +9,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [scroll]   env: CARGO_TERM_COLOR: always @@ -17,13 +17,13 @@ name: compact-codec jobs: compact-codec: - runs-on: - group: Reth + runs-on: ubuntu-latest strategy: matrix: bin: - cargo run --bin reth --features "dev" - cargo run --bin op-reth --features "dev" --manifest-path crates/optimism/bin/Cargo.toml + - cargo run --bin scroll-reth --features "dev" --manifest-path crates/scroll/bin/scroll-reth/Cargo.toml steps: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable @@ -33,8 +33,8 @@ cache-on-failure: true - name: Checkout base uses: actions/checkout@v5 with: - ref: ${{ github.base_ref || 'main' }} - # On `main` branch, generates test vectors and serializes them to disk using `Compact`. + ref: ${{ github.base_ref || 'scroll' }} + # On `scroll` branch, generates test vectors and serializes them to disk using `Compact`. - name: Generate compact vectors run: | ${{ matrix.bin }} -- test-vectors compact --write
diff --git reth/.github/workflows/deny.yml scroll-reth/.github/workflows/deny.yml new file mode 100644 index 0000000000000000000000000000000000000000..6908a3d5a56117cac1a581d25953fa4670ba45cd --- /dev/null +++ scroll-reth/.github/workflows/deny.yml @@ -0,0 +1,27 @@ +# Runs `cargo-deny` when modifying `Cargo.lock`. + +name: deny + +on: + push: + branches: [main, scroll] + paths: [Cargo.lock] + pull_request: + branches: [main, scroll] + paths: [Cargo.lock] + merge_group: + +env: + CARGO_TERM_COLOR: always + +concurrency: deny-${{ github.head_ref || github.run_id }} + +jobs: + deny: + name: deny + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: EmbarkStudios/cargo-deny-action@v2 + with: + command: check all
diff --git reth/.github/workflows/e2e.yml scroll-reth/.github/workflows/e2e.yml index 16c9fb2f6137a70e8e2ab281058c4a875db05a12..ccaf2326617902c42a39eafdce485d6fc3cb98dc 100644 --- reth/.github/workflows/e2e.yml +++ scroll-reth/.github/workflows/e2e.yml @@ -19,10 +19,10 @@ jobs: test: name: e2e-testsuite - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 + RUST_MIN_STACK: 4194304 timeout-minutes: 90 steps: - uses: actions/checkout@v5
diff --git reth/.github/workflows/hive.yml scroll-reth/.github/workflows/hive.yml index 5263eb76deb1a0003ac043132b4b2a020a39888c..8f9d8461f598965a48f0485a3ce683e6329c9963 100644 --- reth/.github/workflows/hive.yml +++ scroll-reth/.github/workflows/hive.yml @@ -24,8 +24,7 @@ prepare-hive: if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Checkout hive tests @@ -34,14 +33,39 @@ with: repository: ethereum/hive path: hivetests   + - name: Get hive commit hash + id: hive-commit + run: echo "hash=$(cd hivetests && git rev-parse HEAD)" >> $GITHUB_OUTPUT + - uses: actions/setup-go@v6 with: go-version: "^1.13.1" - run: go version   + - name: Restore hive assets cache + id: cache-hive + uses: actions/cache@v4 + with: + path: ./hive_assets + key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }} + - name: Build hive assets + if: steps.cache-hive.outputs.cache-hit != 'true' run: .github/assets/hive/build_simulators.sh   + - name: Load cached Docker images + if: steps.cache-hive.outputs.cache-hit == 'true' + run: | + cd hive_assets + for tar_file in *.tar; do + if [ -f "$tar_file" ]; then + echo "Loading $tar_file..." + docker load -i "$tar_file" + fi + done + # Make hive binary executable + chmod +x hive + - name: Upload hive assets uses: actions/upload-artifact@v4 with: @@ -128,6 +152,8 @@ - sim: ethereum/eest/consume-engine limit: .*tests/homestead.* - sim: ethereum/eest/consume-engine limit: .*tests/frontier.* + - sim: ethereum/eest/consume-engine + limit: .*tests/paris.*   # consume-rlp - sim: ethereum/eest/consume-rlp @@ -146,12 +172,13 @@ - sim: ethereum/eest/consume-rlp limit: .*tests/homestead.* - sim: ethereum/eest/consume-rlp limit: .*tests/frontier.* + - sim: ethereum/eest/consume-rlp + limit: .*tests/paris.* needs: - prepare-reth - prepare-hive name: run ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }} - runs-on: - group: Reth + runs-on: ubuntu-latest permissions: issues: write steps: @@ -218,8 +245,7 @@ cat hivetests/workspace/logs/reth/client-*.log notify-on-error: needs: test if: failure() - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - name: Slack Webhook Action uses: rtCamp/action-slack-notify@v2
diff --git reth/.github/workflows/integration.yml scroll-reth/.github/workflows/integration.yml index 90e3287917e19cb453f8cba3a6b4fa69929fc310..7b94dd28e10d4f35f89c876d1ee65cbf588932d8 100644 --- reth/.github/workflows/integration.yml +++ scroll-reth/.github/workflows/integration.yml @@ -6,10 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] - schedule: - # Run once a day at 3:00 UTC - - cron: "0 3 * * *" + branches: [main, scroll]   env: CARGO_TERM_COLOR: always @@ -22,9 +19,7 @@ jobs: test: name: test / ${{ matrix.network }} - if: github.event_name != 'schedule' - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 strategy: @@ -57,7 +52,7 @@ integration-success: name: integration success runs-on: ubuntu-latest - if: always() && github.event_name != 'schedule' + if: always() needs: [test] timeout-minutes: 30 steps:
diff --git reth/.github/workflows/lint.yml scroll-reth/.github/workflows/lint.yml index 309a25218b71acba33e307cba34c0b0c2974dbc1..8491eaf494d47029f2d2ef584132bb9a0f3a99d1 100644 --- reth/.github/workflows/lint.yml +++ scroll-reth/.github/workflows/lint.yml @@ -4,7 +4,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [main, scroll]   env: CARGO_TERM_COLOR: always @@ -20,6 +20,9 @@ include: - type: ethereum args: --workspace --lib --examples --tests --benches --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + - type: scroll + args: --bin scroll-reth --workspace --lib --examples --tests --benches --locked + features: "asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" steps: - uses: actions/checkout@v5 - uses: rui314/setup-mold@v1 @@ -93,7 +96,7 @@ run: .github/assets/check_rv32imac.sh   crate-checks: runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 45 steps: - uses: actions/checkout@v5 - uses: rui314/setup-mold@v1 @@ -103,6 +106,7 @@ - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - run: cargo hack check --workspace + - run: cargo check -p scroll-reth   msrv: name: MSRV @@ -230,7 +234,7 @@ # Checks that selected rates can compile with power set of features features: name: features runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 60 steps: - uses: actions/checkout@v5 - uses: rui314/setup-mold@v1 @@ -262,6 +266,23 @@ deny: uses: ithacaxyz/ci/.github/workflows/deny.yml@main   + openvm: + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2025-08-18 + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - run: cargo install --locked --git https://github.com/openvm-org/openvm.git --tag v1.4.2 cargo-openvm + - name: verify openvm compatibility + env: + OPENVM_RUST_TOOLCHAIN: nightly-2025-08-18 + run: cargo openvm build --manifest-path crates/scroll/openvm-compat/Cargo.toml + lint-success: name: lint success runs-on: ubuntu-latest @@ -281,6 +302,7 @@ - no-test-deps - features - feature-propagation - deny + - openvm timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed
diff --git reth/.github/workflows/pages.yaml scroll-reth/.github/workflows/pages.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5717d4c95543c09d59a7d51803826ead2641a5c5 --- /dev/null +++ scroll-reth/.github/workflows/pages.yaml @@ -0,0 +1,33 @@ +name: Build and publish forkdiff github-pages +permissions: + contents: write +on: + push: + branches: + - scroll +jobs: + deploy: + concurrency: ci-${{ github.ref }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1000 # make sure to fetch the old commit we diff against + + - name: Build forkdiff + uses: "docker://protolambda/forkdiff:0.1.0" + with: + args: -repo=/github/workspace -fork=/github/workspace/fork.yaml -out=/github/workspace/index.html + + - name: Build pages + run: | + mkdir -p tmp/pages + mv index.html tmp/pages/index.html + touch tmp/pages/.nojekyll + + - name: Deploy + uses: JamesIves/github-pages-deploy-action@v4 + with: + folder: tmp/pages + clean: true \ No newline at end of file
diff --git reth/.github/workflows/prepare-reth.yml scroll-reth/.github/workflows/prepare-reth.yml index 37a9445af721dda343b98838e8e385f358697eae..6334297d7af6dcbc53b04df2986ef4014c299bc3 100644 --- reth/.github/workflows/prepare-reth.yml +++ scroll-reth/.github/workflows/prepare-reth.yml @@ -26,8 +26,7 @@ jobs: prepare-reth: if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - run: mkdir artifacts
diff --git reth/.github/workflows/release.yml scroll-reth/.github/workflows/release.yml index 4b637889d2a129e7d99cbfb01a5db8d749381796..ba201679b5aa6953b8b673d564a97970582a3a58 100644 --- reth/.github/workflows/release.yml +++ scroll-reth/.github/workflows/release.yml @@ -18,7 +18,6 @@ env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth @@ -73,10 +72,6 @@ configs: - target: x86_64-unknown-linux-gnu os: ubuntu-24.04 profile: maxperf - allow_fail: false - - target: x86_64-unknown-linux-gnu - os: ubuntu-24.04 - profile: reproducible allow_fail: false - target: aarch64-unknown-linux-gnu os: ubuntu-24.04 @@ -124,13 +119,7 @@ echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV   - name: Build Reth - if: ${{ !(matrix.build.binary == 'op-reth' && matrix.configs.profile == 'reproducible') }} - run: | - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - make build-reth-reproducible - else - make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - fi + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }}   - name: Build Reth deb package if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} @@ -140,13 +129,6 @@ - name: Move binary run: | mkdir artifacts [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - - # Handle reproducible builds which always target x86_64-unknown-linux-gnu - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - mv "target/x86_64-unknown-linux-gnu/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - else - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - fi   # Move deb packages if they exist if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then @@ -329,4 +311,4 @@ echo "- Docker images would be pushed to registry" echo "- A draft release would be created" echo "" echo "### Next Steps" - echo "To perform a real release, push a git tag." + echo "To perform a real release, push a git tag." \ No newline at end of file
diff --git reth/.github/workflows/stage.yml scroll-reth/.github/workflows/stage.yml index 7225d84cffaf06703c877963ccd3ac928d67e7a2..c51e305f60f4e86387613c42645f7993f9594928 100644 --- reth/.github/workflows/stage.yml +++ scroll-reth/.github/workflows/stage.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [ main, scroll ]   env: CARGO_TERM_COLOR: always @@ -22,8 +22,7 @@ stage: name: stage-run-test # Only run stage commands test in merge groups if: github.event_name == 'merge_group' - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1
diff --git reth/.github/workflows/stale.yml scroll-reth/.github/workflows/stale.yml index 297339f53e627e0f2a522938b03666e676875238..66bd6a5b77759bfa3404b651eb6843e1536137c4 100644 --- reth/.github/workflows/stale.yml +++ scroll-reth/.github/workflows/stale.yml @@ -4,8 +4,6 @@ name: stale issues   on: workflow_dispatch: {} - schedule: - - cron: "30 1 * * *"   jobs: close-issues:
diff --git reth/.github/workflows/sync-era.yml scroll-reth/.github/workflows/sync-era.yml index f2539b2fdc2ea72be76cf7201ae0f48b5c85246d..ffd5cdaeaf879b76ebd66c206dae42309259ed0e 100644 --- reth/.github/workflows/sync-era.yml +++ scroll-reth/.github/workflows/sync-era.yml @@ -17,8 +17,7 @@ jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -64,4 +63,4 @@ run: | ${{ matrix.chain.bin }} stage unwind num-blocks 100 --chain ${{ matrix.chain.chain }} - name: Run stage unwind to block hash run: | - ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }} + ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }}
diff --git reth/.github/workflows/sync.yml scroll-reth/.github/workflows/sync.yml index e57082b83e71ea3354aefde78b1b173f821a9c21..8014a9da5537cff71058ebcca8fb10cb841e2a84 100644 --- reth/.github/workflows/sync.yml +++ scroll-reth/.github/workflows/sync.yml @@ -3,9 +3,9 @@ name: sync test   on: - workflow_dispatch: - schedule: - - cron: "0 */6 * * *" + merge_group: + push: + branches: [main, scroll]   env: CARGO_TERM_COLOR: always @@ -17,8 +17,7 @@ jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -38,6 +37,12 @@ chain: base tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7" block: 10000 unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" + - build: install-scroll + bin: scroll-reth + chain: scroll-mainnet + tip: "0x1f398ce1e03b9d7d7fcba8512dc43c9f84ecaffb15954ab178fab48151e84484" + block: 50000 + unwind-target: "0xc434910471ff41b3f097360b8c5d20459018023833081192dbe490a12ae2937f" steps: - uses: actions/checkout@v5 - uses: rui314/setup-mold@v1 @@ -53,6 +58,7 @@ ${{ matrix.chain.bin }} node \ --chain ${{ matrix.chain.chain }} \ --debug.tip ${{ matrix.chain.tip }} \ --debug.max-block ${{ matrix.chain.block }} \ + --builder.gaslimit 20000000 \ --debug.terminate - name: Verify the target block hash run: |
diff --git reth/.github/workflows/unit.yml scroll-reth/.github/workflows/unit.yml index d9aca93f21c0cbd8396249af76cddd6d7abd4cf4..e3c5817df1a02e53029632ac05872f683922fd7e 100644 --- reth/.github/workflows/unit.yml +++ scroll-reth/.github/workflows/unit.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [main, scroll]   env: CARGO_TERM_COLOR: always @@ -20,7 +20,7 @@ jobs: test: name: test / ${{ matrix.type }} (${{ matrix.partition }}/${{ matrix.total_partitions }}) runs-on: - group: Reth + group: scroll-reth-runner-group env: RUST_BACKTRACE: 1 strategy: @@ -42,8 +42,14 @@ - type: optimism args: --features "asm-keccak" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 2 total_partitions: 2 + - type: scroll + args: -p "reth-scroll-*" -p "scroll-alloy-*" --locked + partition: 1 + total_partitions: 1 timeout-minutes: 30 steps: + - name: Free up disk space + run: rm -rf /opt/hostedtoolcache - uses: actions/checkout@v5 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable @@ -60,18 +66,19 @@ run: | cargo nextest run \ ${{ matrix.args }} --workspace \ --exclude ef-tests --no-tests=warn \ - --partition hash:${{ matrix.partition }}/2 \ + --partition hash:${{ matrix.partition }}/${{ matrix.total_partitions }} \ -E "!kind(test) and not binary(e2e_testsuite)"   state: name: Ethereum state tests - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 timeout-minutes: 30 steps: + - name: Free up disk space + run: rm -rf /opt/hostedtoolcache - uses: actions/checkout@v5 - name: Checkout ethereum/tests uses: actions/checkout@v5 @@ -100,8 +107,7 @@ - run: cargo nextest run --release -p ef-tests --features "asm-keccak ef-tests"   doc: name: doc tests - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 timeout-minutes: 30
diff --git reth/fork.yaml scroll-reth/fork.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f82ca15ac97fdd8615509173a659245febeb746 --- /dev/null +++ scroll-reth/fork.yaml @@ -0,0 +1,42 @@ +title: "scroll-reth - reth fork diff overview" +footer: | + Fork-diff overview of [`scroll-reth`](https://github.com/scroll-tech/reth), a fork of [`reth`](https://github.com/paradigmxyz/reth). +base: + name: reth + url: https://github.com/paradigmxyz/reth + hash: e9598ba5ac4e32600e48b93d197a25603b1c644b +fork: + name: scroll-reth + url: https://github.com/scroll-tech/reth + ref: refs/heads/scroll +def: + title: "scroll-reth" + description: | + This is an overview of the changes in [`scroll-reth`](https://github.com/scroll-tech/reth), + a fork of [`reth`](https://github.com/paradigmxyz/reth). + sub: + - title: "crates/scroll" + globs: + - "crates/scroll/**/*" + - title: "crates/ethereum" + globs: + - "crates/ethereum/**/*" + - title: "crates/optimism" + globs: + - "crates/optimism/**/*" + - title: "crates/primitives" + globs: + - "crates/primitives*/**/*" + - title: "crates/stages" + globs: + - "crates/stages/**/*" + - title: "crates/storage" + globs: + - "crates/storage/**/*" + - title: "crates/trie" + globs: + - "crates/trie/**/*" + +ignore: + - "fork.yaml" + - ".github/**"