From 69c4881c0b288de87ac89f1645bec8a3a05d6f1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Mon, 2 Feb 2026 16:51:59 -0300 Subject: [PATCH 01/20] feat: add checkpoint-sync support --- Cargo.lock | 101 ++++++++++ Cargo.toml | 1 + bin/ethlambda/Cargo.toml | 2 + bin/ethlambda/src/checkpoint_sync.rs | 283 +++++++++++++++++++++++++++ bin/ethlambda/src/main.rs | 45 ++++- 5 files changed, 430 insertions(+), 2 deletions(-) create mode 100644 bin/ethlambda/src/checkpoint_sync.rs diff --git a/Cargo.lock b/Cargo.lock index 88a3df4..285c139 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1946,11 +1946,13 @@ dependencies = [ "ethlambda-storage", "ethlambda-types", "hex", + "reqwest", "serde", "serde_json", "serde_yaml_ng", "spawned-concurrency", "spawned-rt", + "thiserror 2.0.17", "tokio", "tracing", "tracing-subscriber", @@ -2956,12 +2958,30 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.5", +] + [[package]] name = "hyper-util" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ + "base64", "bytes", "futures-channel", "futures-core", @@ -2969,7 +2989,9 @@ dependencies = [ "http", "http-body", "hyper", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2 0.6.1", "tokio", @@ -3225,6 +3247,16 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.2" @@ -5972,6 +6004,44 @@ dependencies = [ "bytecheck", ] +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64", + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 1.0.5", +] + [[package]] name = "resolv-conf" version = "0.7.6" @@ -6811,6 +6881,9 @@ name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -7046,6 +7119,16 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.18" @@ -7118,6 +7201,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" diff --git a/Cargo.toml b/Cargo.toml index e925252..5ad3086 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,3 +69,4 @@ vergen-git2 = { version = "9", features = ["rustc"] } rand = "0.9" rocksdb = "0.24" +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } diff --git a/bin/ethlambda/Cargo.toml b/bin/ethlambda/Cargo.toml index ef20137..2f115b2 100644 --- a/bin/ethlambda/Cargo.toml +++ b/bin/ethlambda/Cargo.toml @@ -24,6 +24,8 @@ serde_yaml_ng = "0.10" hex.workspace = true clap.workspace = true +reqwest.workspace = true +thiserror.workspace = true [build-dependencies] vergen-git2.workspace = true diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs new file mode 100644 index 0000000..3e25a9c --- /dev/null +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -0,0 +1,283 @@ +use std::time::Duration; + +use ethlambda_types::block::{Block, BlockBody}; +use ethlambda_types::primitives::Decode; +use ethlambda_types::state::{State, Validator}; +use reqwest::Client; + +const CHECKPOINT_TIMEOUT: Duration = Duration::from_secs(60); +const MAX_STATE_SIZE: u64 = 100 * 1024 * 1024; // 100 MB limit + +#[derive(Debug, thiserror::Error)] +pub enum CheckpointSyncError { + #[error("HTTP request failed: {0}")] + Http(#[from] reqwest::Error), + #[error("SSZ deserialization failed: {0}")] + Ssz(String), + #[error("Verification failed: {0}")] + Verification(String), +} + +/// Fetch finalized state from checkpoint sync URL. +pub async fn fetch_checkpoint_state(base_url: &str) -> Result { + let url = format!( + "{}/lean/v0/states/finalized", + base_url.trim_end_matches('/') + ); + let client = Client::builder().timeout(CHECKPOINT_TIMEOUT).build()?; + + let response = client + .get(&url) + .header("Accept", "application/octet-stream") + .send() + .await? + .error_for_status()?; + + // DoS protection: Check Content-Length before reading + if let Some(content_length) = response.content_length() + && content_length > MAX_STATE_SIZE + { + return Err(CheckpointSyncError::Verification(format!( + "state too large: {} bytes (max {})", + content_length, MAX_STATE_SIZE + ))); + } + + let bytes = response.bytes().await?; + if bytes.len() as u64 > MAX_STATE_SIZE { + return Err(CheckpointSyncError::Verification( + "state exceeds size limit".into(), + )); + } + + State::from_ssz_bytes(&bytes).map_err(|e| CheckpointSyncError::Ssz(format!("{:?}", e))) +} + +/// Verify checkpoint state is structurally valid. +/// +/// Arguments: +/// - state: The downloaded checkpoint state +/// - expected_genesis_time: Genesis time from local config +/// - expected_validators: Validator pubkeys from local genesis config +pub fn verify_checkpoint_state( + state: &State, + expected_genesis_time: u64, + expected_validators: &[Validator], +) -> Result<(), CheckpointSyncError> { + // Slot sanity check + if state.slot == 0 { + return Err(CheckpointSyncError::Verification("slot cannot be 0".into())); + } + + // Validators exist + if state.validators.is_empty() { + return Err(CheckpointSyncError::Verification("no validators".into())); + } + + // Genesis time matches + if state.config.genesis_time != expected_genesis_time { + return Err(CheckpointSyncError::Verification(format!( + "genesis time mismatch: expected {}, got {}", + expected_genesis_time, state.config.genesis_time + ))); + } + + // Validator count matches + if state.validators.len() != expected_validators.len() { + return Err(CheckpointSyncError::Verification(format!( + "validator count mismatch: expected {}, got {}", + expected_validators.len(), + state.validators.len() + ))); + } + + // Validator pubkeys match (critical security check) + for (i, (state_val, expected_val)) in state + .validators + .iter() + .zip(expected_validators.iter()) + .enumerate() + { + if state_val.pubkey != expected_val.pubkey { + return Err(CheckpointSyncError::Verification(format!( + "validator {} pubkey mismatch", + i + ))); + } + } + + // Finalized slot sanity + if state.latest_finalized.slot > state.slot { + return Err(CheckpointSyncError::Verification( + "finalized slot cannot exceed state slot".into(), + )); + } + + // Justified must be at or after finalized + if state.latest_justified.slot < state.latest_finalized.slot { + return Err(CheckpointSyncError::Verification( + "justified slot cannot precede finalized slot".into(), + )); + } + + // Block header slot consistency + if state.latest_block_header.slot > state.slot { + return Err(CheckpointSyncError::Verification( + "block header slot exceeds state slot".into(), + )); + } + + Ok(()) +} + +/// Construct anchor block from checkpoint state. +/// +/// IMPORTANT: This creates a block with default body. The block's tree_hash_root() +/// will only match the original block if the original also had an empty body. +/// For most checkpoint states, this is acceptable because fork choice uses the +/// anchor checkpoint, not individual block lookups. +pub fn construct_anchor_block(state: &State) -> Block { + Block { + slot: state.latest_block_header.slot, + parent_root: state.latest_block_header.parent_root, + proposer_index: state.latest_block_header.proposer_index, + state_root: state.latest_block_header.state_root, + body: BlockBody::default(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ethlambda_types::block::BlockHeader; + use ethlambda_types::primitives::VariableList; + use ethlambda_types::state::{ChainConfig, Checkpoint}; + + // Helper to create valid test state + fn create_test_state(slot: u64, validators: Vec, genesis_time: u64) -> State { + use ethlambda_types::primitives::H256; + use ethlambda_types::state::{JustificationValidators, JustifiedSlots}; + + State { + slot, + validators: VariableList::new(validators).unwrap(), + latest_block_header: BlockHeader { + slot, + parent_root: H256::ZERO, + state_root: H256::ZERO, + body_root: H256::ZERO, + proposer_index: 0, + }, + latest_justified: Checkpoint { + slot: slot.saturating_sub(10), + root: H256::ZERO, + }, + latest_finalized: Checkpoint { + slot: slot.saturating_sub(20), + root: H256::ZERO, + }, + config: ChainConfig { genesis_time }, + historical_block_hashes: Default::default(), + justified_slots: JustifiedSlots::with_capacity(0).unwrap(), + justifications_roots: Default::default(), + justifications_validators: JustificationValidators::with_capacity(0).unwrap(), + } + } + + fn create_test_validator() -> Validator { + Validator { + pubkey: [1u8; 52], + index: 0, + } + } + + fn create_different_validator() -> Validator { + Validator { + pubkey: [2u8; 52], + index: 0, + } + } + + #[test] + fn verify_accepts_valid_state() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators.clone(), 1000); + assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + } + + #[test] + fn verify_rejects_slot_zero() { + let validators = vec![create_test_validator()]; + let state = create_test_state(0, validators.clone(), 1000); + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + + #[test] + fn verify_rejects_empty_validators() { + let state = create_test_state(100, vec![], 1000); + assert!(verify_checkpoint_state(&state, 1000, &[]).is_err()); + } + + #[test] + fn verify_rejects_genesis_time_mismatch() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators.clone(), 1000); + // State has genesis_time=1000, we pass expected=9999 + assert!(verify_checkpoint_state(&state, 9999, &validators).is_err()); + } + + #[test] + fn verify_rejects_validator_count_mismatch() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators.clone(), 1000); + let extra_validators = vec![create_test_validator(), create_test_validator()]; + assert!(verify_checkpoint_state(&state, 1000, &extra_validators).is_err()); + } + + #[test] + fn verify_rejects_validator_pubkey_mismatch() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators.clone(), 1000); + let different_validators = vec![create_different_validator()]; + assert!(verify_checkpoint_state(&state, 1000, &different_validators).is_err()); + } + + #[test] + fn verify_rejects_finalized_after_state_slot() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_finalized.slot = 101; // Finalized after state slot + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + + #[test] + fn verify_rejects_justified_before_finalized() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_finalized.slot = 50; + state.latest_justified.slot = 40; // Justified before finalized + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + + #[test] + fn verify_rejects_block_header_slot_exceeds_state() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 101; // Block header slot exceeds state slot + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + + #[test] + fn construct_anchor_block_copies_header_fields() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators, 1000); + let block = construct_anchor_block(&state); + assert_eq!(block.slot, state.latest_block_header.slot); + assert_eq!(block.parent_root, state.latest_block_header.parent_root); + assert_eq!( + block.proposer_index, + state.latest_block_header.proposer_index + ); + assert_eq!(block.state_root, state.latest_block_header.state_root); + } +} diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index aa438dc..199b364 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -1,3 +1,4 @@ +mod checkpoint_sync; mod version; use std::{ @@ -46,6 +47,10 @@ struct CliOptions { /// The node ID to look up in annotated_validators.yaml (e.g., "ethlambda_0") #[arg(long)] node_id: String, + /// URL of a peer to download checkpoint state from (e.g., http://peer:5052) + /// When set, skips genesis initialization and syncs from checkpoint. + #[arg(long)] + checkpoint_sync_url: Option, } #[tokio::main] @@ -93,9 +98,45 @@ async fn main() { let validator_keys = read_validator_keys(&validators_path, &validator_keys_dir, &options.node_id); - let genesis_state = State::from_genesis(&genesis, validators); let backend = Arc::new(RocksDBBackend::open("./data").expect("Failed to open RocksDB")); - let store = Store::from_genesis(backend, genesis_state); + + let store = if let Some(checkpoint_url) = &options.checkpoint_sync_url { + // Checkpoint sync path + info!(%checkpoint_url, "Starting checkpoint sync"); + + let state = match checkpoint_sync::fetch_checkpoint_state(checkpoint_url).await { + Ok(state) => state, + Err(e) => { + error!(%checkpoint_url, %e, "Checkpoint sync failed"); + std::process::exit(1); + } + }; + + // Verify against local genesis config + if let Err(e) = checkpoint_sync::verify_checkpoint_state( + &state, + genesis.config.genesis_time, + &validators, + ) { + error!(%e, "Checkpoint state verification failed"); + std::process::exit(1); + } + + let anchor_block = checkpoint_sync::construct_anchor_block(&state); + + info!( + slot = state.slot, + validators = state.validators.len(), + finalized_slot = state.latest_finalized.slot, + "Checkpoint sync complete" + ); + + Store::get_forkchoice_store(backend, state, anchor_block) + } else { + // Genesis path (existing code) + let genesis_state = State::from_genesis(&genesis, validators); + Store::from_genesis(backend, genesis_state) + }; let (p2p_tx, p2p_rx) = tokio::sync::mpsc::unbounded_channel(); let blockchain = BlockChain::spawn(store.clone(), p2p_tx, validator_keys); From 02cba0a161b8e89b7c941c1ff7d75fdd339af437 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 16:47:23 -0300 Subject: [PATCH 02/20] fix: update import path --- bin/ethlambda/src/checkpoint_sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index 3e25a9c..8cedb82 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -1,7 +1,7 @@ use std::time::Duration; use ethlambda_types::block::{Block, BlockBody}; -use ethlambda_types::primitives::Decode; +use ethlambda_types::primitives::ssz::Decode; use ethlambda_types::state::{State, Validator}; use reqwest::Client; From 4245026a0222096e9faa8b5eb541590cc05fdff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 17:46:35 -0300 Subject: [PATCH 03/20] feat: use read and connect timeouts instead of e2e --- bin/ethlambda/src/checkpoint_sync.rs | 33 ++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index 8cedb82..1929375 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -5,8 +5,13 @@ use ethlambda_types::primitives::ssz::Decode; use ethlambda_types::state::{State, Validator}; use reqwest::Client; -const CHECKPOINT_TIMEOUT: Duration = Duration::from_secs(60); -const MAX_STATE_SIZE: u64 = 100 * 1024 * 1024; // 100 MB limit +/// Timeout for establishing the HTTP connection to the checkpoint peer. +/// Fail fast if the peer is unreachable. +const CHECKPOINT_CONNECT_TIMEOUT: Duration = Duration::from_secs(15); + +/// Timeout for reading data during body download. +/// This is an inactivity timeout - it resets on each successful read. +const CHECKPOINT_READ_TIMEOUT: Duration = Duration::from_secs(15); #[derive(Debug, thiserror::Error)] pub enum CheckpointSyncError { @@ -19,12 +24,26 @@ pub enum CheckpointSyncError { } /// Fetch finalized state from checkpoint sync URL. +/// +/// Uses two-phase timeout strategy: +/// - Connect timeout (15s): Fails quickly if peer is unreachable +/// - Read timeout (15s): Inactivity timeout that resets on each read +/// +/// Note: We use a read timeout (via `.read_timeout()`) instead of a total download +/// timeout to automatically detect stalled downloads. This allows large states +/// to be downloaded successfully as long as data keeps flowing, while still +/// failing fast if the connection stalls. A plain total timeout would +/// disconnect even for valid downloads if the state is simply too large to +/// transfer within the time limit. pub async fn fetch_checkpoint_state(base_url: &str) -> Result { - let url = format!( - "{}/lean/v0/states/finalized", - base_url.trim_end_matches('/') - ); - let client = Client::builder().timeout(CHECKPOINT_TIMEOUT).build()?; + let base_url = base_url.trim_end_matches('/'); + let url = format!("{base_url}/lean/v0/states/finalized"); + // Use .read_timeout() to detect stalled downloads (inactivity timer). + // This allows large states to complete as long as data keeps flowing. + let client = Client::builder() + .connect_timeout(CHECKPOINT_CONNECT_TIMEOUT) + .read_timeout(CHECKPOINT_READ_TIMEOUT) + .build()?; let response = client .get(&url) From 7333108c151fba499a97b621fbcf0e6195986698 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 17:47:04 -0300 Subject: [PATCH 04/20] fix: remove download size limit --- bin/ethlambda/src/checkpoint_sync.rs | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index 1929375..863767b 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -1,7 +1,7 @@ use std::time::Duration; use ethlambda_types::block::{Block, BlockBody}; -use ethlambda_types::primitives::ssz::Decode; +use ethlambda_types::primitives::ssz::{Decode, TreeHash}; use ethlambda_types::state::{State, Validator}; use reqwest::Client; @@ -52,23 +52,7 @@ pub async fn fetch_checkpoint_state(base_url: &str) -> Result MAX_STATE_SIZE - { - return Err(CheckpointSyncError::Verification(format!( - "state too large: {} bytes (max {})", - content_length, MAX_STATE_SIZE - ))); - } - let bytes = response.bytes().await?; - if bytes.len() as u64 > MAX_STATE_SIZE { - return Err(CheckpointSyncError::Verification( - "state exceeds size limit".into(), - )); - } - State::from_ssz_bytes(&bytes).map_err(|e| CheckpointSyncError::Ssz(format!("{:?}", e))) } From 43d888217a6e0b81363eee7181eea56d7630b6b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 17:47:32 -0300 Subject: [PATCH 05/20] feat: add additional checks --- bin/ethlambda/src/checkpoint_sync.rs | 143 ++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 1 deletion(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index 863767b..f25c4d5 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -94,6 +94,16 @@ pub fn verify_checkpoint_state( ))); } + // Validator indices are sequential (0, 1, 2, ...) + for (position, validator) in state.validators.iter().enumerate() { + if validator.index != position as u64 { + return Err(CheckpointSyncError::Verification(format!( + "validator at position {} has index {} (expected {})", + position, validator.index, position + ))); + } + } + // Validator pubkeys match (critical security check) for (i, (state_val, expected_val)) in state .validators @@ -123,6 +133,15 @@ pub fn verify_checkpoint_state( )); } + // If justified and finalized are at same slot, roots must match + if state.latest_justified.slot == state.latest_finalized.slot + && state.latest_justified.root != state.latest_finalized.root + { + return Err(CheckpointSyncError::Verification( + "justified and finalized at same slot must have matching roots".into(), + )); + } + // Block header slot consistency if state.latest_block_header.slot > state.slot { return Err(CheckpointSyncError::Verification( @@ -130,6 +149,25 @@ pub fn verify_checkpoint_state( )); } + // If block header matches checkpoint slots, roots must match + let block_root = state.latest_block_header.tree_hash_root(); + + if state.latest_block_header.slot == state.latest_finalized.slot + && block_root != state.latest_finalized.root.0 + { + return Err(CheckpointSyncError::Verification( + "block header at finalized slot must match finalized root".into(), + )); + } + + if state.latest_block_header.slot == state.latest_justified.slot + && block_root != state.latest_justified.root.0 + { + return Err(CheckpointSyncError::Verification( + "block header at justified slot must match justified root".into(), + )); + } + Ok(()) } @@ -201,6 +239,15 @@ mod tests { } } + fn create_validators_with_indices(count: usize) -> Vec { + (0..count) + .map(|i| Validator { + pubkey: [i as u8 + 1; 52], + index: i as u64, + }) + .collect() + } + #[test] fn verify_accepts_valid_state() { let validators = vec![create_test_validator()]; @@ -233,10 +280,35 @@ mod tests { fn verify_rejects_validator_count_mismatch() { let validators = vec![create_test_validator()]; let state = create_test_state(100, validators.clone(), 1000); - let extra_validators = vec![create_test_validator(), create_test_validator()]; + let extra_validators = create_validators_with_indices(2); assert!(verify_checkpoint_state(&state, 1000, &extra_validators).is_err()); } + #[test] + fn verify_accepts_multiple_validators_with_sequential_indices() { + let validators = create_validators_with_indices(3); + let state = create_test_state(100, validators.clone(), 1000); + assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + } + + #[test] + fn verify_rejects_non_sequential_validator_indices() { + let mut validators = create_validators_with_indices(3); + validators[1].index = 5; // Wrong index at position 1 + let state = create_test_state(100, validators.clone(), 1000); + let expected_validators = create_validators_with_indices(3); + assert!(verify_checkpoint_state(&state, 1000, &expected_validators).is_err()); + } + + #[test] + fn verify_rejects_duplicate_validator_indices() { + let mut validators = create_validators_with_indices(3); + validators[2].index = 0; // Duplicate index + let state = create_test_state(100, validators.clone(), 1000); + let expected_validators = create_validators_with_indices(3); + assert!(verify_checkpoint_state(&state, 1000, &expected_validators).is_err()); + } + #[test] fn verify_rejects_validator_pubkey_mismatch() { let validators = vec![create_test_validator()]; @@ -262,6 +334,31 @@ mod tests { assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); } + #[test] + fn verify_accepts_justified_equals_finalized_with_matching_roots() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + let common_root = H256::from([42u8; 32]); + state.latest_finalized.slot = 50; + state.latest_finalized.root = common_root; + state.latest_justified.slot = 50; // Same slot + state.latest_justified.root = common_root; // Same root + assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + } + + #[test] + fn verify_rejects_justified_equals_finalized_with_different_roots() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_finalized.slot = 50; + state.latest_finalized.root = H256::from([1u8; 32]); + state.latest_justified.slot = 50; // Same slot + state.latest_justified.root = H256::from([2u8; 32]); // Different root - conflict! + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + #[test] fn verify_rejects_block_header_slot_exceeds_state() { let validators = vec![create_test_validator()]; @@ -270,6 +367,50 @@ mod tests { assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); } + #[test] + fn verify_accepts_block_header_matches_finalized_with_correct_root() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 50; + let block_root = state.latest_block_header.tree_hash_root(); + state.latest_finalized.slot = 50; + state.latest_finalized.root = block_root; + assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + } + + #[test] + fn verify_rejects_block_header_matches_finalized_with_wrong_root() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 50; + state.latest_finalized.slot = 50; + state.latest_finalized.root = H256::from([99u8; 32]); // Wrong root + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + + #[test] + fn verify_accepts_block_header_matches_justified_with_correct_root() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 90; + let block_root = state.latest_block_header.tree_hash_root(); + state.latest_justified.slot = 90; + state.latest_justified.root = block_root; + assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + } + + #[test] + fn verify_rejects_block_header_matches_justified_with_wrong_root() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 90; + state.latest_justified.slot = 90; + state.latest_justified.root = H256::from([99u8; 32]); // Wrong root + assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + } + #[test] fn construct_anchor_block_copies_header_fields() { let validators = vec![create_test_validator()]; From 38a4b3132583f4c747575608c3ae036a0d2f022a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 17:51:31 -0300 Subject: [PATCH 06/20] refactor: clean up the main entrypoint --- bin/ethlambda/src/main.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index 199b364..a2876f0 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -104,21 +104,20 @@ async fn main() { // Checkpoint sync path info!(%checkpoint_url, "Starting checkpoint sync"); - let state = match checkpoint_sync::fetch_checkpoint_state(checkpoint_url).await { - Ok(state) => state, - Err(e) => { - error!(%checkpoint_url, %e, "Checkpoint sync failed"); - std::process::exit(1); - } + let Ok(state) = checkpoint_sync::fetch_checkpoint_state(checkpoint_url) + .await + .inspect_err(|err| error!(%checkpoint_url, %err, "Checkpoint sync failed")) + else { + std::process::exit(1); }; + let genesis_time = genesis.config.genesis_time; + // Verify against local genesis config - if let Err(e) = checkpoint_sync::verify_checkpoint_state( - &state, - genesis.config.genesis_time, - &validators, - ) { - error!(%e, "Checkpoint state verification failed"); + if let Err(err) = + checkpoint_sync::verify_checkpoint_state(&state, genesis_time, &validators) + { + error!(%err, "Checkpoint state verification failed"); std::process::exit(1); } @@ -133,7 +132,6 @@ async fn main() { Store::get_forkchoice_store(backend, state, anchor_block) } else { - // Genesis path (existing code) let genesis_state = State::from_genesis(&genesis, validators); Store::from_genesis(backend, genesis_state) }; From 7fd1ed27c677fcb774ac355c17a039931757cb6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 18:11:13 -0300 Subject: [PATCH 07/20] refactor: move big if to helper --- bin/ethlambda/src/checkpoint_sync.rs | 14 ---- bin/ethlambda/src/main.rs | 96 +++++++++++++++++----------- 2 files changed, 60 insertions(+), 50 deletions(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index f25c4d5..c439b95 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -410,18 +410,4 @@ mod tests { state.latest_justified.root = H256::from([99u8; 32]); // Wrong root assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); } - - #[test] - fn construct_anchor_block_copies_header_fields() { - let validators = vec![create_test_validator()]; - let state = create_test_state(100, validators, 1000); - let block = construct_anchor_block(&state); - assert_eq!(block.slot, state.latest_block_header.slot); - assert_eq!(block.parent_root, state.latest_block_header.parent_root); - assert_eq!( - block.proposer_index, - state.latest_block_header.proposer_index - ); - assert_eq!(block.state_root, state.latest_block_header.state_root); - } } diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index a2876f0..9504f62 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -21,7 +21,7 @@ use tracing::{error, info}; use tracing_subscriber::{EnvFilter, Layer, Registry, layer::SubscriberExt}; use ethlambda_blockchain::BlockChain; -use ethlambda_storage::{Store, backend::RocksDBBackend}; +use ethlambda_storage::{StorageBackend, Store, backend::RocksDBBackend}; const ASCII_ART: &str = r#" _ _ _ _ _ @@ -100,41 +100,15 @@ async fn main() { let backend = Arc::new(RocksDBBackend::open("./data").expect("Failed to open RocksDB")); - let store = if let Some(checkpoint_url) = &options.checkpoint_sync_url { - // Checkpoint sync path - info!(%checkpoint_url, "Starting checkpoint sync"); - - let Ok(state) = checkpoint_sync::fetch_checkpoint_state(checkpoint_url) - .await - .inspect_err(|err| error!(%checkpoint_url, %err, "Checkpoint sync failed")) - else { - std::process::exit(1); - }; - - let genesis_time = genesis.config.genesis_time; - - // Verify against local genesis config - if let Err(err) = - checkpoint_sync::verify_checkpoint_state(&state, genesis_time, &validators) - { - error!(%err, "Checkpoint state verification failed"); - std::process::exit(1); - } - - let anchor_block = checkpoint_sync::construct_anchor_block(&state); - - info!( - slot = state.slot, - validators = state.validators.len(), - finalized_slot = state.latest_finalized.slot, - "Checkpoint sync complete" - ); - - Store::get_forkchoice_store(backend, state, anchor_block) - } else { - let genesis_state = State::from_genesis(&genesis, validators); - Store::from_genesis(backend, genesis_state) - }; + let store = fetch_initial_state( + options.checkpoint_sync_url.as_deref(), + &genesis, + validators, + backend.clone(), + ) + .await + .inspect_err(|err| error!(%err, "Failed to initialize state")) + .unwrap_or_else(|_| std::process::exit(1)); let (p2p_tx, p2p_rx) = tokio::sync::mpsc::unbounded_channel(); let blockchain = BlockChain::spawn(store.clone(), p2p_tx, validator_keys); @@ -317,3 +291,53 @@ fn read_hex_file_bytes(path: impl AsRef) -> Vec { }; bytes } + +/// Fetch the initial state for the node. +/// +/// If `checkpoint_url` is provided, performs checkpoint sync by downloading +/// and verifying the finalized state from a remote peer. Otherwise, creates +/// a genesis state from the local genesis configuration. +/// +/// # Arguments +/// +/// * `checkpoint_url` - Optional URL to fetch checkpoint state from +/// * `genesis` - Genesis configuration (for genesis_time verification and genesis state creation) +/// * `validators` - Validator set (moved for genesis state creation) +/// * `backend` - Storage backend for Store creation +/// +/// # Returns +/// +/// `Ok(Store)` on success, or `Err(CheckpointSyncError)` if checkpoint sync fails. +/// Genesis path is infallible and always returns `Ok`. +async fn fetch_initial_state( + checkpoint_url: Option<&str>, + genesis: &Genesis, + validators: Vec, + backend: Arc, +) -> Result { + let store = if let Some(checkpoint_url) = checkpoint_url { + // Checkpoint sync path + info!(%checkpoint_url, "Starting checkpoint sync"); + + let state = checkpoint_sync::fetch_checkpoint_state(checkpoint_url).await?; + + // Verify against local genesis config + checkpoint_sync::verify_checkpoint_state(&state, genesis.config.genesis_time, &validators)?; + + let anchor_block = checkpoint_sync::construct_anchor_block(&state); + + info!( + slot = state.slot, + validators = state.validators.len(), + finalized_slot = state.latest_finalized.slot, + "Checkpoint sync complete" + ); + + Store::get_forkchoice_store(backend, state, anchor_block) + } else { + let genesis_state = State::from_genesis(genesis, validators); + Store::from_genesis(backend, genesis_state) + }; + + Ok(store) +} From 084985a85f4a243fa0ae456024fe5cdd68e20272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 18:44:32 -0300 Subject: [PATCH 08/20] refactor: use error types instead of strings --- bin/ethlambda/src/checkpoint_sync.rs | 100 +++++++++++++++------------ 1 file changed, 57 insertions(+), 43 deletions(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index c439b95..fb6eeb8 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -1,7 +1,7 @@ use std::time::Duration; use ethlambda_types::block::{Block, BlockBody}; -use ethlambda_types::primitives::ssz::{Decode, TreeHash}; +use ethlambda_types::primitives::ssz::{Decode, DecodeError, TreeHash}; use ethlambda_types::state::{State, Validator}; use reqwest::Client; @@ -17,10 +17,38 @@ const CHECKPOINT_READ_TIMEOUT: Duration = Duration::from_secs(15); pub enum CheckpointSyncError { #[error("HTTP request failed: {0}")] Http(#[from] reqwest::Error), - #[error("SSZ deserialization failed: {0}")] - Ssz(String), - #[error("Verification failed: {0}")] - Verification(String), + #[error("SSZ deserialization failed: {0:?}")] + SszDecode(DecodeError), + #[error("checkpoint state slot cannot be 0")] + SlotIsZero, + #[error("checkpoint state has no validators")] + NoValidators, + #[error("genesis time mismatch: expected {expected}, got {got}")] + GenesisTimeMismatch { expected: u64, got: u64 }, + #[error("validator count mismatch: expected {expected}, got {got}")] + ValidatorCountMismatch { expected: usize, got: usize }, + #[error( + "validator at position {position} has non-sequential index (expected {expected}, got {got})" + )] + NonSequentialValidatorIndex { + position: usize, + expected: u64, + got: u64, + }, + #[error("validator {index} pubkey mismatch")] + ValidatorPubkeyMismatch { index: usize }, + #[error("finalized slot cannot exceed state slot")] + FinalizedExceedsStateSlot, + #[error("justified slot cannot precede finalized slot")] + JustifiedPrecedesFinalized, + #[error("justified and finalized at same slot must have matching roots")] + JustifiedFinalizedRootMismatch, + #[error("block header slot exceeds state slot")] + BlockHeaderSlotExceedsState, + #[error("block header at finalized slot must match finalized root")] + BlockHeaderFinalizedRootMismatch, + #[error("block header at justified slot must match justified root")] + BlockHeaderJustifiedRootMismatch, } /// Fetch finalized state from checkpoint sync URL. @@ -53,7 +81,8 @@ pub async fn fetch_checkpoint_state(base_url: &str) -> Result Result<(), CheckpointSyncError> { // Slot sanity check if state.slot == 0 { - return Err(CheckpointSyncError::Verification("slot cannot be 0".into())); + return Err(CheckpointSyncError::SlotIsZero); } // Validators exist if state.validators.is_empty() { - return Err(CheckpointSyncError::Verification("no validators".into())); + return Err(CheckpointSyncError::NoValidators); } // Genesis time matches if state.config.genesis_time != expected_genesis_time { - return Err(CheckpointSyncError::Verification(format!( - "genesis time mismatch: expected {}, got {}", - expected_genesis_time, state.config.genesis_time - ))); + return Err(CheckpointSyncError::GenesisTimeMismatch { + expected: expected_genesis_time, + got: state.config.genesis_time, + }); } // Validator count matches if state.validators.len() != expected_validators.len() { - return Err(CheckpointSyncError::Verification(format!( - "validator count mismatch: expected {}, got {}", - expected_validators.len(), - state.validators.len() - ))); + return Err(CheckpointSyncError::ValidatorCountMismatch { + expected: expected_validators.len(), + got: state.validators.len(), + }); } // Validator indices are sequential (0, 1, 2, ...) for (position, validator) in state.validators.iter().enumerate() { if validator.index != position as u64 { - return Err(CheckpointSyncError::Verification(format!( - "validator at position {} has index {} (expected {})", - position, validator.index, position - ))); + return Err(CheckpointSyncError::NonSequentialValidatorIndex { + position, + expected: position as u64, + got: validator.index, + }); } } @@ -112,41 +141,30 @@ pub fn verify_checkpoint_state( .enumerate() { if state_val.pubkey != expected_val.pubkey { - return Err(CheckpointSyncError::Verification(format!( - "validator {} pubkey mismatch", - i - ))); + return Err(CheckpointSyncError::ValidatorPubkeyMismatch { index: i }); } } // Finalized slot sanity if state.latest_finalized.slot > state.slot { - return Err(CheckpointSyncError::Verification( - "finalized slot cannot exceed state slot".into(), - )); + return Err(CheckpointSyncError::FinalizedExceedsStateSlot); } // Justified must be at or after finalized if state.latest_justified.slot < state.latest_finalized.slot { - return Err(CheckpointSyncError::Verification( - "justified slot cannot precede finalized slot".into(), - )); + return Err(CheckpointSyncError::JustifiedPrecedesFinalized); } // If justified and finalized are at same slot, roots must match if state.latest_justified.slot == state.latest_finalized.slot && state.latest_justified.root != state.latest_finalized.root { - return Err(CheckpointSyncError::Verification( - "justified and finalized at same slot must have matching roots".into(), - )); + return Err(CheckpointSyncError::JustifiedFinalizedRootMismatch); } // Block header slot consistency if state.latest_block_header.slot > state.slot { - return Err(CheckpointSyncError::Verification( - "block header slot exceeds state slot".into(), - )); + return Err(CheckpointSyncError::BlockHeaderSlotExceedsState); } // If block header matches checkpoint slots, roots must match @@ -155,17 +173,13 @@ pub fn verify_checkpoint_state( if state.latest_block_header.slot == state.latest_finalized.slot && block_root != state.latest_finalized.root.0 { - return Err(CheckpointSyncError::Verification( - "block header at finalized slot must match finalized root".into(), - )); + return Err(CheckpointSyncError::BlockHeaderFinalizedRootMismatch); } if state.latest_block_header.slot == state.latest_justified.slot && block_root != state.latest_justified.root.0 { - return Err(CheckpointSyncError::Verification( - "block header at justified slot must match justified root".into(), - )); + return Err(CheckpointSyncError::BlockHeaderJustifiedRootMismatch); } Ok(()) From bd7d7d089f534fcfc11680807b1d4508b51f8053 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Wed, 4 Feb 2026 18:49:08 -0300 Subject: [PATCH 09/20] docs: update lean clients list --- CLAUDE.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index 98285e1..08348d6 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -313,3 +313,6 @@ cargo test -p ethlambda-blockchain --features skip-signature-verification --test - zeam (Zig): - ream (Rust): - qlean (C++): +- grandine (Rust): +- gean (Go): +- Lantern (C): From fdd2fd0d965b9bfca09097dccb824830ec3fbf2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 15:19:17 -0300 Subject: [PATCH 10/20] refactor: split blocks into headers and bodies --- crates/blockchain/src/store.rs | 60 +++++++-------- .../blockchain/tests/forkchoice_spectests.rs | 8 +- crates/common/types/src/block.rs | 32 ++++++++ crates/net/p2p/src/req_resp/handlers.rs | 2 +- crates/storage/src/api/tables.rs | 11 ++- crates/storage/src/backend/rocksdb.rs | 7 +- crates/storage/src/backend/tests.rs | 30 ++++---- crates/storage/src/store.rs | 77 ++++++++++++++----- 8 files changed, 151 insertions(+), 76 deletions(-) diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index c41af16..7e758d8 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -46,8 +46,8 @@ fn update_head(store: &mut Store) { store.update_checkpoints(ForkCheckpoints::head_only(new_head)); if old_head != new_head { - let old_slot = store.get_block(&old_head).map(|b| b.slot).unwrap_or(0); - let new_slot = store.get_block(&new_head).map(|b| b.slot).unwrap_or(0); + let old_slot = store.get_block_header(&old_head).map(|h| h.slot).unwrap_or(0); + let new_slot = store.get_block_header(&new_head).map(|h| h.slot).unwrap_or(0); let justified_slot = store.latest_justified().slot; let finalized_slot = store.latest_finalized().slot; info!( @@ -91,11 +91,11 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), let data = &attestation.data; // Availability Check - We cannot count a vote if we haven't seen the blocks involved. - let source_block = store - .get_block(&data.source.root) + let source_header = store + .get_block_header(&data.source.root) .ok_or(StoreError::UnknownSourceBlock(data.source.root))?; - let target_block = store - .get_block(&data.target.root) + let target_header = store + .get_block_header(&data.target.root) .ok_or(StoreError::UnknownTargetBlock(data.target.root))?; if !store.contains_block(&data.head.root) { @@ -108,16 +108,16 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), } // Consistency Check - Validate checkpoint slots match block slots. - if source_block.slot != data.source.slot { + if source_header.slot != data.source.slot { return Err(StoreError::SourceSlotMismatch { checkpoint_slot: data.source.slot, - block_slot: source_block.slot, + block_slot: source_header.slot, }); } - if target_block.slot != data.target.slot { + if target_header.slot != data.target.slot { return Err(StoreError::TargetSlotMismatch { checkpoint_slot: data.target.slot, - block_slot: target_block.slot, + block_slot: target_header.slot, }); } @@ -441,12 +441,12 @@ pub fn on_block( pub fn get_attestation_target(store: &Store) -> Checkpoint { // Start from current head let mut target_block_root = store.head(); - let mut target_block = store - .get_block(&target_block_root) + let mut target_header = store + .get_block_header(&target_block_root) .expect("head block exists"); let safe_target_block_slot = store - .get_block(&store.safe_target()) + .get_block_header(&store.safe_target()) .expect("safe target exists") .slot; @@ -455,10 +455,10 @@ pub fn get_attestation_target(store: &Store) -> Checkpoint { // This ensures the target doesn't advance too far ahead of safe target, // providing a balance between liveness and safety. for _ in 0..JUSTIFICATION_LOOKBACK_SLOTS { - if target_block.slot > safe_target_block_slot { - target_block_root = target_block.parent_root; - target_block = store - .get_block(&target_block_root) + if target_header.slot > safe_target_block_slot { + target_block_root = target_header.parent_root; + target_header = store + .get_block_header(&target_block_root) .expect("parent block exists"); } else { break; @@ -469,15 +469,15 @@ pub fn get_attestation_target(store: &Store) -> Checkpoint { // // Walk back until we find a slot that satisfies justifiability rules // relative to the latest finalized checkpoint. - while !slot_is_justifiable_after(target_block.slot, store.latest_finalized().slot) { - target_block_root = target_block.parent_root; - target_block = store - .get_block(&target_block_root) + while !slot_is_justifiable_after(target_header.slot, store.latest_finalized().slot) { + target_block_root = target_header.parent_root; + target_header = store + .get_block_header(&target_block_root) .expect("parent block exists"); } Checkpoint { root: target_block_root, - slot: target_block.slot, + slot: target_header.slot, } } @@ -487,7 +487,7 @@ pub fn produce_attestation_data(store: &Store, slot: u64) -> AttestationData { let head_checkpoint = Checkpoint { root: store.head(), slot: store - .get_block(&store.head()) + .get_block_header(&store.head()) .expect("head block exists") .slot, }; @@ -1055,16 +1055,16 @@ fn is_reorg(old_head: H256, new_head: H256, store: &Store) -> bool { return false; } - let Some(old_head_block) = store.get_block(&old_head) else { + let Some(old_head_header) = store.get_block_header(&old_head) else { return false; }; - let Some(new_head_block) = store.get_block(&new_head) else { + let Some(new_head_header) = store.get_block_header(&new_head) else { return false; }; - let old_slot = old_head_block.slot; - let new_slot = new_head_block.slot; + let old_slot = old_head_header.slot; + let new_slot = new_head_header.slot; // Determine which head has the higher slot and walk back from it let (mut current_root, target_slot, target_root) = if new_slot >= old_slot { @@ -1074,12 +1074,12 @@ fn is_reorg(old_head: H256, new_head: H256, store: &Store) -> bool { }; // Walk back through the chain until we reach the target slot - while let Some(current_block) = store.get_block(¤t_root) { - if current_block.slot <= target_slot { + while let Some(current_header) = store.get_block_header(¤t_root) { + if current_header.slot <= target_slot { // We've reached the target slot - check if we're at the target block return current_root != target_root; } - current_root = current_block.parent_root; + current_root = current_header.parent_root; } // Couldn't walk back far enough (missing blocks in chain) diff --git a/crates/blockchain/tests/forkchoice_spectests.rs b/crates/blockchain/tests/forkchoice_spectests.rs index b35385c..e1c69e1 100644 --- a/crates/blockchain/tests/forkchoice_spectests.rs +++ b/crates/blockchain/tests/forkchoice_spectests.rs @@ -187,13 +187,13 @@ fn validate_checks( // Validate headSlot if let Some(expected_slot) = checks.head_slot { let head_root = st.head(); - let head_block = st - .get_block(&head_root) + let head_header = st + .get_block_header(&head_root) .ok_or_else(|| format!("Step {}: head block not found", step_idx))?; - if head_block.slot != expected_slot { + if head_header.slot != expected_slot { return Err(format!( "Step {}: headSlot mismatch: expected {}, got {}", - step_idx, expected_slot, head_block.slot + step_idx, expected_slot, head_header.slot ) .into()); } diff --git a/crates/common/types/src/block.rs b/crates/common/types/src/block.rs index d54e050..4cbf2f1 100644 --- a/crates/common/types/src/block.rs +++ b/crates/common/types/src/block.rs @@ -205,6 +205,38 @@ pub struct Block { pub body: BlockBody, } +impl Block { + /// Extract the block header, computing the body root. + pub fn header(&self) -> BlockHeader { + BlockHeader { + slot: self.slot, + proposer_index: self.proposer_index, + parent_root: self.parent_root, + state_root: self.state_root, + body_root: self.body.tree_hash_root(), + } + } + + /// Reconstruct a block from header and body. + /// + /// # Panics + /// Panics if the body root doesn't match the header's body_root. + pub fn from_header_and_body(header: BlockHeader, body: BlockBody) -> Self { + debug_assert_eq!( + header.body_root, + body.tree_hash_root(), + "body root mismatch" + ); + Self { + slot: header.slot, + proposer_index: header.proposer_index, + parent_root: header.parent_root, + state_root: header.state_root, + body, + } + } +} + /// The body of a block, containing payload data. /// /// Currently, the main operation is voting. Validators submit attestations which are diff --git a/crates/net/p2p/src/req_resp/handlers.rs b/crates/net/p2p/src/req_resp/handlers.rs index 962a9fe..07c5e74 100644 --- a/crates/net/p2p/src/req_resp/handlers.rs +++ b/crates/net/p2p/src/req_resp/handlers.rs @@ -169,7 +169,7 @@ async fn handle_blocks_by_root_response( pub fn build_status(store: &Store) -> Status { let finalized = store.latest_finalized(); let head_root = store.head(); - let head_slot = store.get_block(&head_root).expect("head block exists").slot; + let head_slot = store.get_block_header(&head_root).expect("head block exists").slot; Status { finalized, head: ethlambda_types::state::Checkpoint { diff --git a/crates/storage/src/api/tables.rs b/crates/storage/src/api/tables.rs index 36ed953..deecdad 100644 --- a/crates/storage/src/api/tables.rs +++ b/crates/storage/src/api/tables.rs @@ -1,8 +1,10 @@ /// Tables in the storage layer. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Table { - /// Block storage: H256 -> Block - Blocks, + /// Block header storage: H256 -> BlockHeader + BlockHeaders, + /// Block body storage: H256 -> BlockBody + BlockBodies, /// Block signatures storage: H256 -> BlockSignaturesWithAttestation /// /// Stored separately from blocks because the genesis block has no signatures. @@ -29,8 +31,9 @@ pub enum Table { } /// All table variants. -pub const ALL_TABLES: [Table; 9] = [ - Table::Blocks, +pub const ALL_TABLES: [Table; 10] = [ + Table::BlockHeaders, + Table::BlockBodies, Table::BlockSignatures, Table::States, Table::LatestKnownAttestations, diff --git a/crates/storage/src/backend/rocksdb.rs b/crates/storage/src/backend/rocksdb.rs index b42053d..c91b2ac 100644 --- a/crates/storage/src/backend/rocksdb.rs +++ b/crates/storage/src/backend/rocksdb.rs @@ -12,7 +12,8 @@ use std::sync::Arc; /// Returns the column family name for a table. fn cf_name(table: Table) -> &'static str { match table { - Table::Blocks => "blocks", + Table::BlockHeaders => "block_headers", + Table::BlockBodies => "block_bodies", Table::BlockSignatures => "block_signatures", Table::States => "states", Table::LatestKnownAttestations => "latest_known_attestations", @@ -166,7 +167,7 @@ mod tests { let backend = RocksDBBackend::open(dir.path()).unwrap(); let mut batch = backend.begin_write().unwrap(); batch - .put_batch(Table::Blocks, vec![(b"key1".to_vec(), b"value1".to_vec())]) + .put_batch(Table::BlockHeaders, vec![(b"key1".to_vec(), b"value1".to_vec())]) .unwrap(); batch.commit().unwrap(); } @@ -175,7 +176,7 @@ mod tests { { let backend = RocksDBBackend::open(dir.path()).unwrap(); let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"key1").unwrap(); + let value = view.get(Table::BlockHeaders, b"key1").unwrap(); assert_eq!(value, Some(b"value1".to_vec())); } } diff --git a/crates/storage/src/backend/tests.rs b/crates/storage/src/backend/tests.rs index 7401577..5a60bb4 100644 --- a/crates/storage/src/backend/tests.rs +++ b/crates/storage/src/backend/tests.rs @@ -28,7 +28,7 @@ fn test_put_and_get(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_put_get_key".to_vec(), b"value1".to_vec())], ) .unwrap(); @@ -38,7 +38,7 @@ fn test_put_and_get(backend: &dyn StorageBackend) { // Read data { let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"test_put_get_key").unwrap(); + let value = view.get(Table::BlockHeaders, b"test_put_get_key").unwrap(); assert_eq!(value, Some(b"value1".to_vec())); } } @@ -49,7 +49,7 @@ fn test_delete(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_delete_key".to_vec(), b"value1".to_vec())], ) .unwrap(); @@ -60,7 +60,7 @@ fn test_delete(backend: &dyn StorageBackend) { { let mut batch = backend.begin_write().unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_delete_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_delete_key".to_vec()]) .unwrap(); batch.commit().unwrap(); } @@ -68,7 +68,7 @@ fn test_delete(backend: &dyn StorageBackend) { // Verify deleted { let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"test_delete_key").unwrap(); + let value = view.get(Table::BlockHeaders, b"test_delete_key").unwrap(); assert_eq!(value, None); } } @@ -109,7 +109,7 @@ fn test_prefix_iterator(backend: &dyn StorageBackend) { fn test_nonexistent_key(backend: &dyn StorageBackend) { let view = backend.begin_read().unwrap(); let value = view - .get(Table::Blocks, b"test_nonexistent_key_12345") + .get(Table::BlockHeaders, b"test_nonexistent_key_12345") .unwrap(); assert_eq!(value, None); } @@ -120,7 +120,7 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_del_put_key".to_vec(), b"old".to_vec())], ) .unwrap(); @@ -131,11 +131,11 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { { let mut batch = backend.begin_write().unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_del_put_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_del_put_key".to_vec()]) .unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_del_put_key".to_vec(), b"new".to_vec())], ) .unwrap(); @@ -144,7 +144,7 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { let view = backend.begin_read().unwrap(); assert_eq!( - view.get(Table::Blocks, b"test_del_put_key").unwrap(), + view.get(Table::BlockHeaders, b"test_del_put_key").unwrap(), Some(b"new".to_vec()) ); } @@ -155,18 +155,18 @@ fn test_put_then_delete(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_put_del_key".to_vec(), b"value".to_vec())], ) .unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_put_del_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_put_del_key".to_vec()]) .unwrap(); batch.commit().unwrap(); } let view = backend.begin_read().unwrap(); - assert_eq!(view.get(Table::Blocks, b"test_put_del_key").unwrap(), None); + assert_eq!(view.get(Table::BlockHeaders, b"test_put_del_key").unwrap(), None); } fn test_multiple_tables(backend: &dyn StorageBackend) { @@ -175,7 +175,7 @@ fn test_multiple_tables(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_multi_key".to_vec(), b"block".to_vec())], ) .unwrap(); @@ -192,7 +192,7 @@ fn test_multiple_tables(backend: &dyn StorageBackend) { { let view = backend.begin_read().unwrap(); assert_eq!( - view.get(Table::Blocks, b"test_multi_key").unwrap(), + view.get(Table::BlockHeaders, b"test_multi_key").unwrap(), Some(b"block".to_vec()) ); assert_eq!( diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index ed55223..c37e400 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -7,7 +7,7 @@ use crate::types::{StoredAggregatedPayload, StoredSignature}; use ethlambda_types::{ attestation::AttestationData, block::{ - AggregatedSignatureProof, Block, BlockBody, BlockSignaturesWithAttestation, + AggregatedSignatureProof, Block, BlockBody, BlockHeader, BlockSignaturesWithAttestation, BlockWithAttestation, SignedBlockWithAttestation, }, primitives::{ @@ -170,14 +170,22 @@ impl Store { .put_batch(Table::Metadata, metadata_entries) .expect("put metadata"); - // Block and state - let block_entries = vec![( + // Block header and body (stored separately) + let header_entries = vec![( anchor_block_root.as_ssz_bytes(), - anchor_block.as_ssz_bytes(), + anchor_block.header().as_ssz_bytes(), )]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + let body_entries = vec![( + anchor_block_root.as_ssz_bytes(), + anchor_block.body.as_ssz_bytes(), + )]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); let state_entries = vec![( anchor_block_root.as_ssz_bytes(), @@ -454,26 +462,49 @@ impl Store { removed_count } + /// Get the full block by combining header and body. pub fn get_block(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); - view.get(Table::Blocks, &root.as_ssz_bytes()) + let key = root.as_ssz_bytes(); + + let header_bytes = view.get(Table::BlockHeaders, &key).expect("get")?; + let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; + + let header = BlockHeader::from_ssz_bytes(&header_bytes).expect("valid header"); + let body = BlockBody::from_ssz_bytes(&body_bytes).expect("valid body"); + + Some(Block::from_header_and_body(header, body)) + } + + /// Get only the block header without deserializing the body. + /// + /// This is more efficient than `get_block` when only header fields are needed. + pub fn get_block_header(&self, root: &H256) -> Option { + let view = self.backend.begin_read().expect("read view"); + view.get(Table::BlockHeaders, &root.as_ssz_bytes()) .expect("get") - .map(|bytes| Block::from_ssz_bytes(&bytes).expect("valid block")) + .map(|bytes| BlockHeader::from_ssz_bytes(&bytes).expect("valid header")) } pub fn contains_block(&self, root: &H256) -> bool { let view = self.backend.begin_read().expect("read view"); - view.get(Table::Blocks, &root.as_ssz_bytes()) + view.get(Table::BlockHeaders, &root.as_ssz_bytes()) .expect("get") .is_some() } pub fn insert_block(&mut self, root: H256, block: Block) { let mut batch = self.backend.begin_write().expect("write batch"); - let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; + + let header_entries = vec![(root.as_ssz_bytes(), block.header().as_ssz_bytes())]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); let index_entries = vec![( encode_live_chain_key(block.slot, &root), @@ -513,10 +544,15 @@ impl Store { let mut batch = self.backend.begin_write().expect("write batch"); - let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; + let header_entries = vec![(root.as_ssz_bytes(), block.header().as_ssz_bytes())]; + batch + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); let sig_entries = vec![(root.as_ssz_bytes(), signatures.as_ssz_bytes())]; batch @@ -534,18 +570,21 @@ impl Store { batch.commit().expect("commit"); } - /// Get a signed block by combining block and signatures. + /// Get a signed block by combining header, body, and signatures. /// - /// Returns None if either the block or signatures are not found. + /// Returns None if any of the components are not found. /// Note: Genesis block has no entry in BlockSignatures table. pub fn get_signed_block(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); let key = root.as_ssz_bytes(); - let block_bytes = view.get(Table::Blocks, &key).expect("get")?; + let header_bytes = view.get(Table::BlockHeaders, &key).expect("get")?; + let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; let sig_bytes = view.get(Table::BlockSignatures, &key).expect("get")?; - let block = Block::from_ssz_bytes(&block_bytes).expect("valid block"); + let header = BlockHeader::from_ssz_bytes(&header_bytes).expect("valid header"); + let body = BlockBody::from_ssz_bytes(&body_bytes).expect("valid body"); + let block = Block::from_header_and_body(header, body); let signatures = BlockSignaturesWithAttestation::from_ssz_bytes(&sig_bytes).expect("valid signatures"); From 6aac3e21c7172342688e0fa26db5840d12c56117 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 15:21:30 -0300 Subject: [PATCH 11/20] refactor: remove unused get_block --- crates/storage/src/store.rs | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index c37e400..cfc0c6a 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -462,23 +462,7 @@ impl Store { removed_count } - /// Get the full block by combining header and body. - pub fn get_block(&self, root: &H256) -> Option { - let view = self.backend.begin_read().expect("read view"); - let key = root.as_ssz_bytes(); - - let header_bytes = view.get(Table::BlockHeaders, &key).expect("get")?; - let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; - - let header = BlockHeader::from_ssz_bytes(&header_bytes).expect("valid header"); - let body = BlockBody::from_ssz_bytes(&body_bytes).expect("valid body"); - - Some(Block::from_header_and_body(header, body)) - } - - /// Get only the block header without deserializing the body. - /// - /// This is more efficient than `get_block` when only header fields are needed. + /// Get the block header by root. pub fn get_block_header(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::BlockHeaders, &root.as_ssz_bytes()) @@ -860,7 +844,7 @@ impl Store { /// Returns the slot of the current safe target block. pub fn safe_target_slot(&self) -> u64 { - self.get_block(&self.safe_target()) + self.get_block_header(&self.safe_target()) .expect("safe target exists") .slot } From 715cd22d5a65ec816c130290a8a03243a1e7af7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:00:46 -0300 Subject: [PATCH 12/20] refactor: remove usages of contains_block --- crates/blockchain/src/store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index 7e758d8..d058119 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -98,9 +98,9 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), .get_block_header(&data.target.root) .ok_or(StoreError::UnknownTargetBlock(data.target.root))?; - if !store.contains_block(&data.head.root) { - return Err(StoreError::UnknownHeadBlock(data.head.root)); - } + let _ = store + .get_block_header(&data.head.root) + .ok_or(StoreError::UnknownHeadBlock(data.head.root))?; // Topology Check - Source must be older than Target. if data.source.slot > data.target.slot { From 7d5f47b22102e571b1acdb839bfcbf278cb0258a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:03:55 -0300 Subject: [PATCH 13/20] refactor: replace contains_block with has_state and remove unused functions --- crates/blockchain/src/lib.rs | 4 +-- crates/blockchain/src/store.rs | 2 +- crates/storage/src/store.rs | 66 ++++------------------------------ 3 files changed, 9 insertions(+), 63 deletions(-) diff --git a/crates/blockchain/src/lib.rs b/crates/blockchain/src/lib.rs index 8f58b39..510de5a 100644 --- a/crates/blockchain/src/lib.rs +++ b/crates/blockchain/src/lib.rs @@ -279,8 +279,8 @@ impl BlockChainServer { let parent_root = signed_block.message.block.parent_root; let proposer = signed_block.message.block.proposer_index; - // Check if parent block exists before attempting to process - if !self.store.contains_block(&parent_root) { + // Check if parent state exists before attempting to process + if !self.store.has_state(&parent_root) { info!(%slot, %parent_root, %block_root, "Block parent missing, storing as pending"); // Store block for later processing diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index d058119..75460f6 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -325,7 +325,7 @@ pub fn on_block( let slot = block.slot; // Skip duplicate blocks (idempotent operation) - if store.contains_block(&block_root) { + if store.has_state(&block_root) { return Ok(()); } diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index cfc0c6a..d0b07a0 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -470,37 +470,6 @@ impl Store { .map(|bytes| BlockHeader::from_ssz_bytes(&bytes).expect("valid header")) } - pub fn contains_block(&self, root: &H256) -> bool { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::BlockHeaders, &root.as_ssz_bytes()) - .expect("get") - .is_some() - } - - pub fn insert_block(&mut self, root: H256, block: Block) { - let mut batch = self.backend.begin_write().expect("write batch"); - - let header_entries = vec![(root.as_ssz_bytes(), block.header().as_ssz_bytes())]; - batch - .put_batch(Table::BlockHeaders, header_entries) - .expect("put block header"); - - let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; - batch - .put_batch(Table::BlockBodies, body_entries) - .expect("put block body"); - - let index_entries = vec![( - encode_live_chain_key(block.slot, &root), - block.parent_root.as_ssz_bytes(), - )]; - batch - .put_batch(Table::LiveChain, index_entries) - .expect("put non-finalized chain index"); - - batch.commit().expect("commit"); - } - // ============ Signed Blocks ============ /// Insert a signed block, storing the block and signatures separately. @@ -577,27 +546,18 @@ impl Store { // ============ States ============ - /// Iterate over all (root, state) pairs. - pub fn iter_states(&self) -> impl Iterator + '_ { + pub fn get_state(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); - let entries: Vec<_> = view - .prefix_iterator(Table::States, &[]) - .expect("iterator") - .filter_map(|res| res.ok()) - .map(|(k, v)| { - let root = H256::from_ssz_bytes(&k).expect("valid root"); - let state = State::from_ssz_bytes(&v).expect("valid state"); - (root, state) - }) - .collect(); - entries.into_iter() + view.get(Table::States, &root.as_ssz_bytes()) + .expect("get") + .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) } - pub fn get_state(&self, root: &H256) -> Option { + pub fn has_state(&self, root: &H256) -> bool { let view = self.backend.begin_read().expect("read view"); view.get(Table::States, &root.as_ssz_bytes()) .expect("get") - .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) + .is_some() } pub fn insert_state(&mut self, root: H256, state: State) { @@ -738,20 +698,6 @@ impl Store { entries.into_iter() } - pub fn get_gossip_signature(&self, key: &SignatureKey) -> Option { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::GossipSignatures, &encode_signature_key(key)) - .expect("get") - .and_then(|bytes| StoredSignature::from_ssz_bytes(&bytes).ok()) - } - - pub fn contains_gossip_signature(&self, key: &SignatureKey) -> bool { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::GossipSignatures, &encode_signature_key(key)) - .expect("get") - .is_some() - } - pub fn insert_gossip_signature( &mut self, attestation_data: &AttestationData, From c9afc2ba8ab796a58d252b11464aa40d749ade17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:11:43 -0300 Subject: [PATCH 14/20] feat: add constructor to init Store from a State only --- crates/storage/src/store.rs | 50 ++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index d0b07a0..9ca6f5b 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -138,13 +138,32 @@ impl Store { backend: Arc, anchor_state: State, anchor_block: Block, + ) -> Self { + Self::init_store(backend, anchor_state, anchor_block.header(), Some(anchor_block.body)) + } + + /// Initialize a Store from an anchor state only. + /// + /// Uses the state's `latest_block_header` as the anchor block header. + /// No block body is stored since it's not available. + pub fn from_anchor_state(backend: Arc, anchor_state: State) -> Self { + let header = anchor_state.latest_block_header.clone(); + Self::init_store(backend, anchor_state, header, None) + } + + /// Internal helper to initialize the store with anchor data. + fn init_store( + backend: Arc, + anchor_state: State, + anchor_header: BlockHeader, + anchor_body: Option, ) -> Self { let anchor_state_root = anchor_state.tree_hash_root(); - let anchor_block_root = anchor_block.tree_hash_root(); + let anchor_block_root = anchor_header.tree_hash_root(); let anchor_checkpoint = Checkpoint { root: anchor_block_root, - slot: anchor_block.slot, + slot: anchor_header.slot, }; // Insert initial data @@ -170,23 +189,24 @@ impl Store { .put_batch(Table::Metadata, metadata_entries) .expect("put metadata"); - // Block header and body (stored separately) + // Block header let header_entries = vec![( anchor_block_root.as_ssz_bytes(), - anchor_block.header().as_ssz_bytes(), + anchor_header.as_ssz_bytes(), )]; batch .put_batch(Table::BlockHeaders, header_entries) .expect("put block header"); - let body_entries = vec![( - anchor_block_root.as_ssz_bytes(), - anchor_block.body.as_ssz_bytes(), - )]; - batch - .put_batch(Table::BlockBodies, body_entries) - .expect("put block body"); + // Block body (if provided) + if let Some(body) = anchor_body { + let body_entries = vec![(anchor_block_root.as_ssz_bytes(), body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); + } + // State let state_entries = vec![( anchor_block_root.as_ssz_bytes(), anchor_state.as_ssz_bytes(), @@ -195,14 +215,14 @@ impl Store { .put_batch(Table::States, state_entries) .expect("put state"); - // Non-finalized chain index + // Live chain index let index_entries = vec![( - encode_live_chain_key(anchor_block.slot, &anchor_block_root), - anchor_block.parent_root.as_ssz_bytes(), + encode_live_chain_key(anchor_header.slot, &anchor_block_root), + anchor_header.parent_root.as_ssz_bytes(), )]; batch .put_batch(Table::LiveChain, index_entries) - .expect("put non-finalized chain index"); + .expect("put live chain index"); batch.commit().expect("commit"); } From eefc463a5a343e69ce531c1ba5d9d05fb68f53da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:27:12 -0300 Subject: [PATCH 15/20] refactor: add additional checks and simplify init_store --- crates/common/types/src/block.rs | 2 +- crates/storage/src/store.rs | 70 +++++++++++++++++++++----------- 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/crates/common/types/src/block.rs b/crates/common/types/src/block.rs index 4cbf2f1..f2b734d 100644 --- a/crates/common/types/src/block.rs +++ b/crates/common/types/src/block.rs @@ -176,7 +176,7 @@ impl BlockSignaturesWithAttestation { /// /// Headers are smaller than full blocks. They're useful for tracking the chain /// without storing everything. -#[derive(Debug, Clone, Serialize, Encode, Decode, TreeHash)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Encode, Decode, TreeHash)] pub struct BlockHeader { /// The slot in which the block was proposed pub slot: u64, diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index 9ca6f5b..aeda818 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -118,28 +118,35 @@ pub struct Store { impl Store { /// Initialize a Store from a genesis state. - pub fn from_genesis(backend: Arc, mut genesis_state: State) -> Self { - // Ensure the header state root is zero before computing the state root - genesis_state.latest_block_header.state_root = H256::ZERO; - - let genesis_state_root = genesis_state.tree_hash_root(); - let genesis_block = Block { - slot: 0, - proposer_index: 0, - parent_root: H256::ZERO, - state_root: genesis_state_root, - body: BlockBody::default(), - }; - Self::get_forkchoice_store(backend, genesis_state, genesis_block) + pub fn from_genesis(backend: Arc, genesis_state: State) -> Self { + Self::from_anchor_state(backend, genesis_state) } /// Initialize a Store from an anchor state and block. + /// + /// The block must match the state's `latest_block_header`. + /// + /// # Panics + /// + /// Panics if the block's header doesn't match the state's `latest_block_header` + /// (comparing all fields except `state_root`, which is computed internally). pub fn get_forkchoice_store( backend: Arc, anchor_state: State, anchor_block: Block, ) -> Self { - Self::init_store(backend, anchor_state, anchor_block.header(), Some(anchor_block.body)) + // Compare headers with state_root zeroed (init_store handles state_root separately) + let mut state_header = anchor_state.latest_block_header.clone(); + let mut block_header = anchor_block.header(); + state_header.state_root = H256::ZERO; + block_header.state_root = H256::ZERO; + + assert_eq!( + state_header, block_header, + "block header doesn't match state's latest_block_header" + ); + + Self::init_store(backend, anchor_state, Some(anchor_block.body)) } /// Initialize a Store from an anchor state only. @@ -147,23 +154,40 @@ impl Store { /// Uses the state's `latest_block_header` as the anchor block header. /// No block body is stored since it's not available. pub fn from_anchor_state(backend: Arc, anchor_state: State) -> Self { - let header = anchor_state.latest_block_header.clone(); - Self::init_store(backend, anchor_state, header, None) + Self::init_store(backend, anchor_state, None) } /// Internal helper to initialize the store with anchor data. + /// + /// Header is taken from `anchor_state.latest_block_header`. fn init_store( backend: Arc, - anchor_state: State, - anchor_header: BlockHeader, + mut anchor_state: State, anchor_body: Option, ) -> Self { + // Save original state_root for validation + let original_state_root = anchor_state.latest_block_header.state_root; + + // Zero out state_root before computing (state contains header, header contains state_root) + anchor_state.latest_block_header.state_root = H256::ZERO; + + // Compute state root with zeroed header let anchor_state_root = anchor_state.tree_hash_root(); - let anchor_block_root = anchor_header.tree_hash_root(); + + // Validate: original must be zero (genesis) or match computed (checkpoint sync) + assert!( + original_state_root == H256::ZERO || original_state_root == anchor_state_root, + "anchor header state_root mismatch: expected {anchor_state_root:?}, got {original_state_root:?}" + ); + + // Populate the correct state_root + anchor_state.latest_block_header.state_root = anchor_state_root; + + let anchor_block_root = anchor_state.latest_block_header.tree_hash_root(); let anchor_checkpoint = Checkpoint { root: anchor_block_root, - slot: anchor_header.slot, + slot: anchor_state.latest_block_header.slot, }; // Insert initial data @@ -192,7 +216,7 @@ impl Store { // Block header let header_entries = vec![( anchor_block_root.as_ssz_bytes(), - anchor_header.as_ssz_bytes(), + anchor_state.latest_block_header.as_ssz_bytes(), )]; batch .put_batch(Table::BlockHeaders, header_entries) @@ -217,8 +241,8 @@ impl Store { // Live chain index let index_entries = vec![( - encode_live_chain_key(anchor_header.slot, &anchor_block_root), - anchor_header.parent_root.as_ssz_bytes(), + encode_live_chain_key(anchor_state.latest_block_header.slot, &anchor_block_root), + anchor_state.latest_block_header.parent_root.as_ssz_bytes(), )]; batch .put_batch(Table::LiveChain, index_entries) From 1ea56ef454783ba395a50814f75f0acaf3aa3e81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:29:16 -0300 Subject: [PATCH 16/20] refactor: remove from_genesis --- bin/ethlambda/src/main.rs | 2 +- crates/net/rpc/src/lib.rs | 4 ++-- crates/storage/src/store.rs | 17 ++++++----------- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index aa438dc..b399a74 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -95,7 +95,7 @@ async fn main() { let genesis_state = State::from_genesis(&genesis, validators); let backend = Arc::new(RocksDBBackend::open("./data").expect("Failed to open RocksDB")); - let store = Store::from_genesis(backend, genesis_state); + let store = Store::from_anchor_state(backend, genesis_state); let (p2p_tx, p2p_rx) = tokio::sync::mpsc::unbounded_channel(); let blockchain = BlockChain::spawn(store.clone(), p2p_tx, validator_keys); diff --git a/crates/net/rpc/src/lib.rs b/crates/net/rpc/src/lib.rs index fb5100c..e1f2b9e 100644 --- a/crates/net/rpc/src/lib.rs +++ b/crates/net/rpc/src/lib.rs @@ -118,7 +118,7 @@ mod tests { async fn test_get_latest_justified_checkpoint() { let state = create_test_state(); let backend = Arc::new(InMemoryBackend::new()); - let store = Store::from_genesis(backend, state); + let store = Store::from_anchor_state(backend, state); let app = build_api_router(store.clone()); @@ -154,7 +154,7 @@ mod tests { let state = create_test_state(); let backend = Arc::new(InMemoryBackend::new()); - let store = Store::from_genesis(backend, state); + let store = Store::from_anchor_state(backend, state); // Get the expected state from the store let finalized = store.latest_finalized(); diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index aeda818..9d75e09 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -117,9 +117,12 @@ pub struct Store { } impl Store { - /// Initialize a Store from a genesis state. - pub fn from_genesis(backend: Arc, genesis_state: State) -> Self { - Self::from_anchor_state(backend, genesis_state) + /// Initialize a Store from an anchor state only. + /// + /// Uses the state's `latest_block_header` as the anchor block header. + /// No block body is stored since it's not available. + pub fn from_anchor_state(backend: Arc, anchor_state: State) -> Self { + Self::init_store(backend, anchor_state, None) } /// Initialize a Store from an anchor state and block. @@ -149,14 +152,6 @@ impl Store { Self::init_store(backend, anchor_state, Some(anchor_block.body)) } - /// Initialize a Store from an anchor state only. - /// - /// Uses the state's `latest_block_header` as the anchor block header. - /// No block body is stored since it's not available. - pub fn from_anchor_state(backend: Arc, anchor_state: State) -> Self { - Self::init_store(backend, anchor_state, None) - } - /// Internal helper to initialize the store with anchor data. /// /// Header is taken from `anchor_state.latest_block_header`. From 4664a1c7234b3f53f619e539da3d772cfbaf1ced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:47:14 -0300 Subject: [PATCH 17/20] docs: update Store documentation --- crates/storage/src/store.rs | 67 ++++++++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 15 deletions(-) diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index 9d75e09..d1a586c 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -105,14 +105,23 @@ fn decode_live_chain_key(bytes: &[u8]) -> (u64, H256) { (slot, root) } -/// Underlying storage of the node. -/// Similar to the spec's `Store`, but backed by a pluggable storage backend. +/// Fork choice store backed by a pluggable storage backend. /// -/// All data is stored in the backend. Metadata fields (time, config, head, etc.) -/// are stored in the Metadata table with their field name as the key. +/// The Store maintains all state required for fork choice and block processing: +/// +/// - **Metadata**: time, config, head, safe_target, justified/finalized checkpoints +/// - **Blocks**: headers and bodies stored separately for efficient header-only queries +/// - **States**: beacon states indexed by block root +/// - **Attestations**: latest known and pending ("new") attestations per validator +/// - **Signatures**: gossip signatures and aggregated proofs for signature verification +/// - **LiveChain**: slot index for efficient fork choice traversal (pruned on finalization) +/// +/// # Constructors +/// +/// - [`from_anchor_state`](Self::from_anchor_state): Initialize from a checkpoint state (no block body) +/// - [`get_forkchoice_store`](Self::get_forkchoice_store): Initialize from state + block (stores body) #[derive(Clone)] pub struct Store { - /// Storage backend for all store data. backend: Arc, } @@ -272,44 +281,50 @@ impl Store { // ============ Time ============ + /// Returns the current store time in seconds since genesis. pub fn time(&self) -> u64 { self.get_metadata(KEY_TIME) } + /// Sets the current store time. pub fn set_time(&mut self, time: u64) { self.set_metadata(KEY_TIME, &time); } // ============ Config ============ + /// Returns the chain configuration. pub fn config(&self) -> ChainConfig { self.get_metadata(KEY_CONFIG) } // ============ Head ============ + /// Returns the current head block root. pub fn head(&self) -> H256 { self.get_metadata(KEY_HEAD) } // ============ Safe Target ============ + /// Returns the safe target block root for attestations. pub fn safe_target(&self) -> H256 { self.get_metadata(KEY_SAFE_TARGET) } + /// Sets the safe target block root. pub fn set_safe_target(&mut self, safe_target: H256) { self.set_metadata(KEY_SAFE_TARGET, &safe_target); } - // ============ Latest Justified ============ + // ============ Checkpoints ============ + /// Returns the latest justified checkpoint. pub fn latest_justified(&self) -> Checkpoint { self.get_metadata(KEY_LATEST_JUSTIFIED) } - // ============ Latest Finalized ============ - + /// Returns the latest finalized checkpoint. pub fn latest_finalized(&self) -> Checkpoint { self.get_metadata(KEY_LATEST_FINALIZED) } @@ -585,6 +600,7 @@ impl Store { // ============ States ============ + /// Returns the state for the given block root. pub fn get_state(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::States, &root.as_ssz_bytes()) @@ -592,6 +608,7 @@ impl Store { .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) } + /// Returns whether a state exists for the given block root. pub fn has_state(&self, root: &H256) -> bool { let view = self.backend.begin_read().expect("read view"); view.get(Table::States, &root.as_ssz_bytes()) @@ -599,6 +616,7 @@ impl Store { .is_some() } + /// Stores a state indexed by block root. pub fn insert_state(&mut self, root: H256, state: State) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(root.as_ssz_bytes(), state.as_ssz_bytes())]; @@ -606,9 +624,12 @@ impl Store { batch.commit().expect("commit"); } - // ============ Latest Known Attestations ============ + // ============ Known Attestations ============ + // + // "Known" attestations are included in fork choice weight calculations. + // They're promoted from "new" attestations at specific intervals. - /// Iterate over all (validator_id, attestation_data) pairs for known attestations. + /// Iterates over all known attestations (validator_id, attestation_data). pub fn iter_known_attestations(&self) -> impl Iterator + '_ { let view = self.backend.begin_read().expect("read view"); let entries: Vec<_> = view @@ -624,6 +645,7 @@ impl Store { entries.into_iter() } + /// Returns a validator's latest known attestation. pub fn get_known_attestation(&self, validator_id: &u64) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::LatestKnownAttestations, &validator_id.as_ssz_bytes()) @@ -631,6 +653,7 @@ impl Store { .map(|bytes| AttestationData::from_ssz_bytes(&bytes).expect("valid attestation data")) } + /// Stores a validator's latest known attestation. pub fn insert_known_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; @@ -640,9 +663,12 @@ impl Store { batch.commit().expect("commit"); } - // ============ Latest New Attestations ============ + // ============ New Attestations ============ + // + // "New" attestations are pending attestations not yet included in fork choice. + // They're promoted to "known" via `promote_new_attestations`. - /// Iterate over all (validator_id, attestation_data) pairs for new attestations. + /// Iterates over all new (pending) attestations. pub fn iter_new_attestations(&self) -> impl Iterator + '_ { let view = self.backend.begin_read().expect("read view"); let entries: Vec<_> = view @@ -658,6 +684,7 @@ impl Store { entries.into_iter() } + /// Returns a validator's latest new (pending) attestation. pub fn get_new_attestation(&self, validator_id: &u64) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::LatestNewAttestations, &validator_id.as_ssz_bytes()) @@ -665,6 +692,7 @@ impl Store { .map(|bytes| AttestationData::from_ssz_bytes(&bytes).expect("valid attestation data")) } + /// Stores a validator's new (pending) attestation. pub fn insert_new_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; @@ -674,6 +702,7 @@ impl Store { batch.commit().expect("commit"); } + /// Removes a validator's new (pending) attestation. pub fn remove_new_attestation(&mut self, validator_id: &u64) { let mut batch = self.backend.begin_write().expect("write batch"); batch @@ -717,8 +746,11 @@ impl Store { } // ============ Gossip Signatures ============ + // + // Gossip signatures are individual validator signatures received via P2P. + // They're aggregated into proofs for block signature verification. - /// Iterate over all (signature_key, stored_signature) pairs. + /// Iterates over all gossip signatures. pub fn iter_gossip_signatures( &self, ) -> impl Iterator + '_ { @@ -737,6 +769,7 @@ impl Store { entries.into_iter() } + /// Stores a gossip signature for later aggregation. pub fn insert_gossip_signature( &mut self, attestation_data: &AttestationData, @@ -757,8 +790,11 @@ impl Store { } // ============ Aggregated Payloads ============ + // + // Aggregated payloads are leanVM proofs combining multiple signatures. + // Used to verify block signatures efficiently. - /// Iterate over all (signature_key, stored_payloads) pairs. + /// Iterates over all aggregated signature payloads. pub fn iter_aggregated_payloads( &self, ) -> impl Iterator)> + '_ { @@ -777,7 +813,8 @@ impl Store { entries.into_iter() } - pub fn get_aggregated_payloads( + /// Returns aggregated payloads for a signature key. + fn get_aggregated_payloads( &self, key: &SignatureKey, ) -> Option> { From d3f2e908997f2480c36747d0e0cc027a13a43178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 16:48:15 -0300 Subject: [PATCH 18/20] chore: fmt --- crates/blockchain/src/store.rs | 10 ++++++++-- crates/net/p2p/src/req_resp/handlers.rs | 5 ++++- crates/storage/src/backend/rocksdb.rs | 5 ++++- crates/storage/src/backend/tests.rs | 5 ++++- crates/storage/src/store.rs | 5 +---- 5 files changed, 21 insertions(+), 9 deletions(-) diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index 75460f6..4cc787e 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -46,8 +46,14 @@ fn update_head(store: &mut Store) { store.update_checkpoints(ForkCheckpoints::head_only(new_head)); if old_head != new_head { - let old_slot = store.get_block_header(&old_head).map(|h| h.slot).unwrap_or(0); - let new_slot = store.get_block_header(&new_head).map(|h| h.slot).unwrap_or(0); + let old_slot = store + .get_block_header(&old_head) + .map(|h| h.slot) + .unwrap_or(0); + let new_slot = store + .get_block_header(&new_head) + .map(|h| h.slot) + .unwrap_or(0); let justified_slot = store.latest_justified().slot; let finalized_slot = store.latest_finalized().slot; info!( diff --git a/crates/net/p2p/src/req_resp/handlers.rs b/crates/net/p2p/src/req_resp/handlers.rs index 07c5e74..a11e06d 100644 --- a/crates/net/p2p/src/req_resp/handlers.rs +++ b/crates/net/p2p/src/req_resp/handlers.rs @@ -169,7 +169,10 @@ async fn handle_blocks_by_root_response( pub fn build_status(store: &Store) -> Status { let finalized = store.latest_finalized(); let head_root = store.head(); - let head_slot = store.get_block_header(&head_root).expect("head block exists").slot; + let head_slot = store + .get_block_header(&head_root) + .expect("head block exists") + .slot; Status { finalized, head: ethlambda_types::state::Checkpoint { diff --git a/crates/storage/src/backend/rocksdb.rs b/crates/storage/src/backend/rocksdb.rs index c91b2ac..c25b128 100644 --- a/crates/storage/src/backend/rocksdb.rs +++ b/crates/storage/src/backend/rocksdb.rs @@ -167,7 +167,10 @@ mod tests { let backend = RocksDBBackend::open(dir.path()).unwrap(); let mut batch = backend.begin_write().unwrap(); batch - .put_batch(Table::BlockHeaders, vec![(b"key1".to_vec(), b"value1".to_vec())]) + .put_batch( + Table::BlockHeaders, + vec![(b"key1".to_vec(), b"value1".to_vec())], + ) .unwrap(); batch.commit().unwrap(); } diff --git a/crates/storage/src/backend/tests.rs b/crates/storage/src/backend/tests.rs index 5a60bb4..f1a96b0 100644 --- a/crates/storage/src/backend/tests.rs +++ b/crates/storage/src/backend/tests.rs @@ -166,7 +166,10 @@ fn test_put_then_delete(backend: &dyn StorageBackend) { } let view = backend.begin_read().unwrap(); - assert_eq!(view.get(Table::BlockHeaders, b"test_put_del_key").unwrap(), None); + assert_eq!( + view.get(Table::BlockHeaders, b"test_put_del_key").unwrap(), + None + ); } fn test_multiple_tables(backend: &dyn StorageBackend) { diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index d1a586c..415e653 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -814,10 +814,7 @@ impl Store { } /// Returns aggregated payloads for a signature key. - fn get_aggregated_payloads( - &self, - key: &SignatureKey, - ) -> Option> { + fn get_aggregated_payloads(&self, key: &SignatureKey) -> Option> { let view = self.backend.begin_read().expect("read view"); view.get(Table::AggregatedPayloads, &encode_signature_key(key)) .expect("get") From 4fdf5ec3e4c109bf1d3625be295699ab19b4c3bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:12:45 -0300 Subject: [PATCH 19/20] refactor: avoid storing empty bodies --- crates/common/types/src/block.rs | 4 ++-- crates/storage/src/store.rs | 34 ++++++++++++++++++++++++-------- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/crates/common/types/src/block.rs b/crates/common/types/src/block.rs index f2b734d..776dbb1 100644 --- a/crates/common/types/src/block.rs +++ b/crates/common/types/src/block.rs @@ -219,8 +219,8 @@ impl Block { /// Reconstruct a block from header and body. /// - /// # Panics - /// Panics if the body root doesn't match the header's body_root. + /// The caller should ensure that `header.body_root` matches `body.tree_hash_root()`. + /// This is verified with a debug assertion but not in release builds. pub fn from_header_and_body(header: BlockHeader, body: BlockBody) -> Self { debug_assert_eq!( header.body_root, diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index 415e653..b6ef9ca 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -1,5 +1,11 @@ use std::collections::{HashMap, HashSet}; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; + +/// The tree hash root of an empty block body. +/// +/// Used to detect genesis/anchor blocks that have no attestations, +/// allowing us to skip storing empty bodies and reconstruct them on read. +static EMPTY_BODY_ROOT: LazyLock = LazyLock::new(|| BlockBody::default().tree_hash_root()); use crate::api::{StorageBackend, Table}; use crate::types::{StoredAggregatedPayload, StoredSignature}; @@ -137,6 +143,7 @@ impl Store { /// Initialize a Store from an anchor state and block. /// /// The block must match the state's `latest_block_header`. + /// Named to mirror the spec's `get_forkchoice_store` function. /// /// # Panics /// @@ -551,15 +558,19 @@ impl Store { let mut batch = self.backend.begin_write().expect("write batch"); - let header_entries = vec![(root.as_ssz_bytes(), block.header().as_ssz_bytes())]; + let header = block.header(); + let header_entries = vec![(root.as_ssz_bytes(), header.as_ssz_bytes())]; batch .put_batch(Table::BlockHeaders, header_entries) .expect("put block header"); - let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; - batch - .put_batch(Table::BlockBodies, body_entries) - .expect("put block body"); + // Skip storing empty bodies - they can be reconstructed from the header's body_root + if header.body_root != *EMPTY_BODY_ROOT { + let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); + } let sig_entries = vec![(root.as_ssz_bytes(), signatures.as_ssz_bytes())]; batch @@ -586,11 +597,18 @@ impl Store { let key = root.as_ssz_bytes(); let header_bytes = view.get(Table::BlockHeaders, &key).expect("get")?; - let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; let sig_bytes = view.get(Table::BlockSignatures, &key).expect("get")?; let header = BlockHeader::from_ssz_bytes(&header_bytes).expect("valid header"); - let body = BlockBody::from_ssz_bytes(&body_bytes).expect("valid body"); + + // Use empty body if header indicates empty, otherwise fetch from DB + let body = if header.body_root == *EMPTY_BODY_ROOT { + BlockBody::default() + } else { + let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; + BlockBody::from_ssz_bytes(&body_bytes).expect("valid body") + }; + let block = Block::from_header_and_body(header, body); let signatures = BlockSignaturesWithAttestation::from_ssz_bytes(&sig_bytes).expect("valid signatures"); From 460575a17e46fe0915e16978cb36f921ffd2b9c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Gr=C3=BCner?= <47506558+MegaRedHand@users.noreply.github.com> Date: Thu, 5 Feb 2026 18:36:22 -0300 Subject: [PATCH 20/20] refactor: simplify a bit the code --- bin/ethlambda/src/checkpoint_sync.rs | 103 ++++++++++++++------------- bin/ethlambda/src/main.rs | 67 ++++++++--------- 2 files changed, 81 insertions(+), 89 deletions(-) diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs index fb6eeb8..20b3a7f 100644 --- a/bin/ethlambda/src/checkpoint_sync.rs +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -1,8 +1,7 @@ use std::time::Duration; -use ethlambda_types::block::{Block, BlockBody}; use ethlambda_types::primitives::ssz::{Decode, DecodeError, TreeHash}; -use ethlambda_types::state::{State, Validator}; +use ethlambda_types::state::State; use reqwest::Client; /// Timeout for establishing the HTTP connection to the checkpoint peer. @@ -89,13 +88,14 @@ pub async fn fetch_checkpoint_state(base_url: &str) -> Result Result<(), CheckpointSyncError> { + let expected_genesis_time: u64 = genesis_state.config.genesis_time; + let expected_validators = &genesis_state.validators; + // Slot sanity check if state.slot == 0 { return Err(CheckpointSyncError::SlotIsZero); @@ -185,28 +185,12 @@ pub fn verify_checkpoint_state( Ok(()) } -/// Construct anchor block from checkpoint state. -/// -/// IMPORTANT: This creates a block with default body. The block's tree_hash_root() -/// will only match the original block if the original also had an empty body. -/// For most checkpoint states, this is acceptable because fork choice uses the -/// anchor checkpoint, not individual block lookups. -pub fn construct_anchor_block(state: &State) -> Block { - Block { - slot: state.latest_block_header.slot, - parent_root: state.latest_block_header.parent_root, - proposer_index: state.latest_block_header.proposer_index, - state_root: state.latest_block_header.state_root, - body: BlockBody::default(), - } -} - #[cfg(test)] mod tests { use super::*; use ethlambda_types::block::BlockHeader; use ethlambda_types::primitives::VariableList; - use ethlambda_types::state::{ChainConfig, Checkpoint}; + use ethlambda_types::state::{ChainConfig, Checkpoint, Validator}; // Helper to create valid test state fn create_test_state(slot: u64, validators: Vec, genesis_time: u64) -> State { @@ -239,6 +223,11 @@ mod tests { } } + /// Create a genesis state to use as reference for verification. + fn create_genesis_state(validators: Vec, genesis_time: u64) -> State { + create_test_state(0, validators, genesis_time) + } + fn create_test_validator() -> Validator { Validator { pubkey: [1u8; 52], @@ -266,69 +255,74 @@ mod tests { fn verify_accepts_valid_state() { let validators = vec![create_test_validator()]; let state = create_test_state(100, validators.clone(), 1000); - assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); } #[test] fn verify_rejects_slot_zero() { let validators = vec![create_test_validator()]; let state = create_test_state(0, validators.clone(), 1000); - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] fn verify_rejects_empty_validators() { let state = create_test_state(100, vec![], 1000); - assert!(verify_checkpoint_state(&state, 1000, &[]).is_err()); + let genesis = create_genesis_state(vec![], 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] fn verify_rejects_genesis_time_mismatch() { let validators = vec![create_test_validator()]; let state = create_test_state(100, validators.clone(), 1000); - // State has genesis_time=1000, we pass expected=9999 - assert!(verify_checkpoint_state(&state, 9999, &validators).is_err()); + // State has genesis_time=1000, genesis has 9999 + let genesis = create_genesis_state(validators, 9999); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] fn verify_rejects_validator_count_mismatch() { let validators = vec![create_test_validator()]; - let state = create_test_state(100, validators.clone(), 1000); - let extra_validators = create_validators_with_indices(2); - assert!(verify_checkpoint_state(&state, 1000, &extra_validators).is_err()); + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(create_validators_with_indices(2), 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] fn verify_accepts_multiple_validators_with_sequential_indices() { let validators = create_validators_with_indices(3); let state = create_test_state(100, validators.clone(), 1000); - assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); } #[test] fn verify_rejects_non_sequential_validator_indices() { let mut validators = create_validators_with_indices(3); validators[1].index = 5; // Wrong index at position 1 - let state = create_test_state(100, validators.clone(), 1000); - let expected_validators = create_validators_with_indices(3); - assert!(verify_checkpoint_state(&state, 1000, &expected_validators).is_err()); + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(create_validators_with_indices(3), 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] fn verify_rejects_duplicate_validator_indices() { let mut validators = create_validators_with_indices(3); validators[2].index = 0; // Duplicate index - let state = create_test_state(100, validators.clone(), 1000); - let expected_validators = create_validators_with_indices(3); - assert!(verify_checkpoint_state(&state, 1000, &expected_validators).is_err()); + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(create_validators_with_indices(3), 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] fn verify_rejects_validator_pubkey_mismatch() { let validators = vec![create_test_validator()]; - let state = create_test_state(100, validators.clone(), 1000); - let different_validators = vec![create_different_validator()]; - assert!(verify_checkpoint_state(&state, 1000, &different_validators).is_err()); + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(vec![create_different_validator()], 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] @@ -336,7 +330,8 @@ mod tests { let validators = vec![create_test_validator()]; let mut state = create_test_state(100, validators.clone(), 1000); state.latest_finalized.slot = 101; // Finalized after state slot - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] @@ -345,7 +340,8 @@ mod tests { let mut state = create_test_state(100, validators.clone(), 1000); state.latest_finalized.slot = 50; state.latest_justified.slot = 40; // Justified before finalized - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] @@ -358,7 +354,8 @@ mod tests { state.latest_finalized.root = common_root; state.latest_justified.slot = 50; // Same slot state.latest_justified.root = common_root; // Same root - assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); } #[test] @@ -370,7 +367,8 @@ mod tests { state.latest_finalized.root = H256::from([1u8; 32]); state.latest_justified.slot = 50; // Same slot state.latest_justified.root = H256::from([2u8; 32]); // Different root - conflict! - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] @@ -378,7 +376,8 @@ mod tests { let validators = vec![create_test_validator()]; let mut state = create_test_state(100, validators.clone(), 1000); state.latest_block_header.slot = 101; // Block header slot exceeds state slot - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] @@ -389,7 +388,8 @@ mod tests { let block_root = state.latest_block_header.tree_hash_root(); state.latest_finalized.slot = 50; state.latest_finalized.root = block_root; - assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); } #[test] @@ -400,7 +400,8 @@ mod tests { state.latest_block_header.slot = 50; state.latest_finalized.slot = 50; state.latest_finalized.root = H256::from([99u8; 32]); // Wrong root - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } #[test] @@ -411,7 +412,8 @@ mod tests { let block_root = state.latest_block_header.tree_hash_root(); state.latest_justified.slot = 90; state.latest_justified.root = block_root; - assert!(verify_checkpoint_state(&state, 1000, &validators).is_ok()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); } #[test] @@ -422,6 +424,7 @@ mod tests { state.latest_block_header.slot = 90; state.latest_justified.slot = 90; state.latest_justified.root = H256::from([99u8; 32]); // Wrong root - assert!(verify_checkpoint_state(&state, 1000, &validators).is_err()); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); } } diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index 2d81d6e..90acbcc 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -21,7 +21,7 @@ use tracing::{error, info}; use tracing_subscriber::{EnvFilter, Layer, Registry, layer::SubscriberExt}; use ethlambda_blockchain::BlockChain; -use ethlambda_storage::{StorageBackend, Store, backend::RocksDBBackend}; +use ethlambda_storage::{Store, backend::RocksDBBackend}; const ASCII_ART: &str = r#" _ _ _ _ _ @@ -100,15 +100,15 @@ async fn main() { let backend = Arc::new(RocksDBBackend::open("./data").expect("Failed to open RocksDB")); - let store = fetch_initial_state( - options.checkpoint_sync_url.as_deref(), - &genesis, - validators, - backend.clone(), - ) - .await - .inspect_err(|err| error!(%err, "Failed to initialize state")) - .unwrap_or_else(|_| std::process::exit(1)); + let mut state = State::from_genesis(&genesis, validators); + + if let Some(checkpoint_sync_url) = &options.checkpoint_sync_url { + state = fetch_initial_state(checkpoint_sync_url, state) + .await + .inspect_err(|err| error!(%err, "Failed to initialize state")) + .unwrap_or_else(|_| std::process::exit(1)); + } + let store = Store::from_anchor_state(backend, state); let (p2p_tx, p2p_rx) = tokio::sync::mpsc::unbounded_channel(); let blockchain = BlockChain::spawn(store.clone(), p2p_tx, validator_keys); @@ -310,34 +310,23 @@ fn read_hex_file_bytes(path: impl AsRef) -> Vec { /// `Ok(Store)` on success, or `Err(CheckpointSyncError)` if checkpoint sync fails. /// Genesis path is infallible and always returns `Ok`. async fn fetch_initial_state( - checkpoint_url: Option<&str>, - genesis: &Genesis, - validators: Vec, - backend: Arc, -) -> Result { - let store = if let Some(checkpoint_url) = checkpoint_url { - // Checkpoint sync path - info!(%checkpoint_url, "Starting checkpoint sync"); - - let state = checkpoint_sync::fetch_checkpoint_state(checkpoint_url).await?; - - // Verify against local genesis config - checkpoint_sync::verify_checkpoint_state(&state, genesis.config.genesis_time, &validators)?; - - let anchor_block = checkpoint_sync::construct_anchor_block(&state); - - info!( - slot = state.slot, - validators = state.validators.len(), - finalized_slot = state.latest_finalized.slot, - "Checkpoint sync complete" - ); - - Store::get_forkchoice_store(backend, state, anchor_block) - } else { - let genesis_state = State::from_genesis(genesis, validators); - Store::from_anchor_state(backend, genesis_state) - }; + checkpoint_url: &str, + genesis_state: State, +) -> Result { + // Checkpoint sync path + info!(%checkpoint_url, "Starting checkpoint sync"); + + let state = checkpoint_sync::fetch_checkpoint_state(checkpoint_url).await?; + + // Verify against local genesis config + checkpoint_sync::verify_checkpoint_state(&state, &genesis_state)?; + + info!( + slot = state.slot, + validators = state.validators.len(), + finalized_slot = state.latest_finalized.slot, + "Checkpoint sync complete" + ); - Ok(store) + Ok(state) }