diff --git a/CLAUDE.md b/CLAUDE.md index 98285e1..08348d6 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -313,3 +313,6 @@ cargo test -p ethlambda-blockchain --features skip-signature-verification --test - zeam (Zig): - ream (Rust): - qlean (C++): +- grandine (Rust): +- gean (Go): +- Lantern (C): diff --git a/Cargo.lock b/Cargo.lock index 88a3df4..285c139 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1946,11 +1946,13 @@ dependencies = [ "ethlambda-storage", "ethlambda-types", "hex", + "reqwest", "serde", "serde_json", "serde_yaml_ng", "spawned-concurrency", "spawned-rt", + "thiserror 2.0.17", "tokio", "tracing", "tracing-subscriber", @@ -2956,12 +2958,30 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.5", +] + [[package]] name = "hyper-util" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ + "base64", "bytes", "futures-channel", "futures-core", @@ -2969,7 +2989,9 @@ dependencies = [ "http", "http-body", "hyper", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2 0.6.1", "tokio", @@ -3225,6 +3247,16 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.2" @@ -5972,6 +6004,44 @@ dependencies = [ "bytecheck", ] +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64", + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 1.0.5", +] + [[package]] name = "resolv-conf" version = "0.7.6" @@ -6811,6 +6881,9 @@ name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -7046,6 +7119,16 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.18" @@ -7118,6 +7201,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" diff --git a/Cargo.toml b/Cargo.toml index e925252..5ad3086 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,3 +69,4 @@ vergen-git2 = { version = "9", features = ["rustc"] } rand = "0.9" rocksdb = "0.24" +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } diff --git a/bin/ethlambda/Cargo.toml b/bin/ethlambda/Cargo.toml index 16eb517..f05a1a2 100644 --- a/bin/ethlambda/Cargo.toml +++ b/bin/ethlambda/Cargo.toml @@ -25,6 +25,8 @@ serde_yaml_ng = "0.10" hex.workspace = true clap.workspace = true +reqwest.workspace = true +thiserror.workspace = true [build-dependencies] vergen-git2.workspace = true diff --git a/bin/ethlambda/src/checkpoint_sync.rs b/bin/ethlambda/src/checkpoint_sync.rs new file mode 100644 index 0000000..20b3a7f --- /dev/null +++ b/bin/ethlambda/src/checkpoint_sync.rs @@ -0,0 +1,430 @@ +use std::time::Duration; + +use ethlambda_types::primitives::ssz::{Decode, DecodeError, TreeHash}; +use ethlambda_types::state::State; +use reqwest::Client; + +/// Timeout for establishing the HTTP connection to the checkpoint peer. +/// Fail fast if the peer is unreachable. +const CHECKPOINT_CONNECT_TIMEOUT: Duration = Duration::from_secs(15); + +/// Timeout for reading data during body download. +/// This is an inactivity timeout - it resets on each successful read. +const CHECKPOINT_READ_TIMEOUT: Duration = Duration::from_secs(15); + +#[derive(Debug, thiserror::Error)] +pub enum CheckpointSyncError { + #[error("HTTP request failed: {0}")] + Http(#[from] reqwest::Error), + #[error("SSZ deserialization failed: {0:?}")] + SszDecode(DecodeError), + #[error("checkpoint state slot cannot be 0")] + SlotIsZero, + #[error("checkpoint state has no validators")] + NoValidators, + #[error("genesis time mismatch: expected {expected}, got {got}")] + GenesisTimeMismatch { expected: u64, got: u64 }, + #[error("validator count mismatch: expected {expected}, got {got}")] + ValidatorCountMismatch { expected: usize, got: usize }, + #[error( + "validator at position {position} has non-sequential index (expected {expected}, got {got})" + )] + NonSequentialValidatorIndex { + position: usize, + expected: u64, + got: u64, + }, + #[error("validator {index} pubkey mismatch")] + ValidatorPubkeyMismatch { index: usize }, + #[error("finalized slot cannot exceed state slot")] + FinalizedExceedsStateSlot, + #[error("justified slot cannot precede finalized slot")] + JustifiedPrecedesFinalized, + #[error("justified and finalized at same slot must have matching roots")] + JustifiedFinalizedRootMismatch, + #[error("block header slot exceeds state slot")] + BlockHeaderSlotExceedsState, + #[error("block header at finalized slot must match finalized root")] + BlockHeaderFinalizedRootMismatch, + #[error("block header at justified slot must match justified root")] + BlockHeaderJustifiedRootMismatch, +} + +/// Fetch finalized state from checkpoint sync URL. +/// +/// Uses two-phase timeout strategy: +/// - Connect timeout (15s): Fails quickly if peer is unreachable +/// - Read timeout (15s): Inactivity timeout that resets on each read +/// +/// Note: We use a read timeout (via `.read_timeout()`) instead of a total download +/// timeout to automatically detect stalled downloads. This allows large states +/// to be downloaded successfully as long as data keeps flowing, while still +/// failing fast if the connection stalls. A plain total timeout would +/// disconnect even for valid downloads if the state is simply too large to +/// transfer within the time limit. +pub async fn fetch_checkpoint_state(base_url: &str) -> Result { + let base_url = base_url.trim_end_matches('/'); + let url = format!("{base_url}/lean/v0/states/finalized"); + // Use .read_timeout() to detect stalled downloads (inactivity timer). + // This allows large states to complete as long as data keeps flowing. + let client = Client::builder() + .connect_timeout(CHECKPOINT_CONNECT_TIMEOUT) + .read_timeout(CHECKPOINT_READ_TIMEOUT) + .build()?; + + let response = client + .get(&url) + .header("Accept", "application/octet-stream") + .send() + .await? + .error_for_status()?; + + let bytes = response.bytes().await?; + let state = State::from_ssz_bytes(&bytes).map_err(CheckpointSyncError::SszDecode)?; + Ok(state) +} + +/// Verify checkpoint state is structurally valid. +/// +/// Arguments: +/// - state: The downloaded checkpoint state +/// - genesis_state: Local genesis state used as reference for genesis time and validators +pub fn verify_checkpoint_state( + state: &State, + genesis_state: &State, +) -> Result<(), CheckpointSyncError> { + let expected_genesis_time: u64 = genesis_state.config.genesis_time; + let expected_validators = &genesis_state.validators; + + // Slot sanity check + if state.slot == 0 { + return Err(CheckpointSyncError::SlotIsZero); + } + + // Validators exist + if state.validators.is_empty() { + return Err(CheckpointSyncError::NoValidators); + } + + // Genesis time matches + if state.config.genesis_time != expected_genesis_time { + return Err(CheckpointSyncError::GenesisTimeMismatch { + expected: expected_genesis_time, + got: state.config.genesis_time, + }); + } + + // Validator count matches + if state.validators.len() != expected_validators.len() { + return Err(CheckpointSyncError::ValidatorCountMismatch { + expected: expected_validators.len(), + got: state.validators.len(), + }); + } + + // Validator indices are sequential (0, 1, 2, ...) + for (position, validator) in state.validators.iter().enumerate() { + if validator.index != position as u64 { + return Err(CheckpointSyncError::NonSequentialValidatorIndex { + position, + expected: position as u64, + got: validator.index, + }); + } + } + + // Validator pubkeys match (critical security check) + for (i, (state_val, expected_val)) in state + .validators + .iter() + .zip(expected_validators.iter()) + .enumerate() + { + if state_val.pubkey != expected_val.pubkey { + return Err(CheckpointSyncError::ValidatorPubkeyMismatch { index: i }); + } + } + + // Finalized slot sanity + if state.latest_finalized.slot > state.slot { + return Err(CheckpointSyncError::FinalizedExceedsStateSlot); + } + + // Justified must be at or after finalized + if state.latest_justified.slot < state.latest_finalized.slot { + return Err(CheckpointSyncError::JustifiedPrecedesFinalized); + } + + // If justified and finalized are at same slot, roots must match + if state.latest_justified.slot == state.latest_finalized.slot + && state.latest_justified.root != state.latest_finalized.root + { + return Err(CheckpointSyncError::JustifiedFinalizedRootMismatch); + } + + // Block header slot consistency + if state.latest_block_header.slot > state.slot { + return Err(CheckpointSyncError::BlockHeaderSlotExceedsState); + } + + // If block header matches checkpoint slots, roots must match + let block_root = state.latest_block_header.tree_hash_root(); + + if state.latest_block_header.slot == state.latest_finalized.slot + && block_root != state.latest_finalized.root.0 + { + return Err(CheckpointSyncError::BlockHeaderFinalizedRootMismatch); + } + + if state.latest_block_header.slot == state.latest_justified.slot + && block_root != state.latest_justified.root.0 + { + return Err(CheckpointSyncError::BlockHeaderJustifiedRootMismatch); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use ethlambda_types::block::BlockHeader; + use ethlambda_types::primitives::VariableList; + use ethlambda_types::state::{ChainConfig, Checkpoint, Validator}; + + // Helper to create valid test state + fn create_test_state(slot: u64, validators: Vec, genesis_time: u64) -> State { + use ethlambda_types::primitives::H256; + use ethlambda_types::state::{JustificationValidators, JustifiedSlots}; + + State { + slot, + validators: VariableList::new(validators).unwrap(), + latest_block_header: BlockHeader { + slot, + parent_root: H256::ZERO, + state_root: H256::ZERO, + body_root: H256::ZERO, + proposer_index: 0, + }, + latest_justified: Checkpoint { + slot: slot.saturating_sub(10), + root: H256::ZERO, + }, + latest_finalized: Checkpoint { + slot: slot.saturating_sub(20), + root: H256::ZERO, + }, + config: ChainConfig { genesis_time }, + historical_block_hashes: Default::default(), + justified_slots: JustifiedSlots::with_capacity(0).unwrap(), + justifications_roots: Default::default(), + justifications_validators: JustificationValidators::with_capacity(0).unwrap(), + } + } + + /// Create a genesis state to use as reference for verification. + fn create_genesis_state(validators: Vec, genesis_time: u64) -> State { + create_test_state(0, validators, genesis_time) + } + + fn create_test_validator() -> Validator { + Validator { + pubkey: [1u8; 52], + index: 0, + } + } + + fn create_different_validator() -> Validator { + Validator { + pubkey: [2u8; 52], + index: 0, + } + } + + fn create_validators_with_indices(count: usize) -> Vec { + (0..count) + .map(|i| Validator { + pubkey: [i as u8 + 1; 52], + index: i as u64, + }) + .collect() + } + + #[test] + fn verify_accepts_valid_state() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators.clone(), 1000); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); + } + + #[test] + fn verify_rejects_slot_zero() { + let validators = vec![create_test_validator()]; + let state = create_test_state(0, validators.clone(), 1000); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_empty_validators() { + let state = create_test_state(100, vec![], 1000); + let genesis = create_genesis_state(vec![], 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_genesis_time_mismatch() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators.clone(), 1000); + // State has genesis_time=1000, genesis has 9999 + let genesis = create_genesis_state(validators, 9999); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_validator_count_mismatch() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(create_validators_with_indices(2), 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_accepts_multiple_validators_with_sequential_indices() { + let validators = create_validators_with_indices(3); + let state = create_test_state(100, validators.clone(), 1000); + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); + } + + #[test] + fn verify_rejects_non_sequential_validator_indices() { + let mut validators = create_validators_with_indices(3); + validators[1].index = 5; // Wrong index at position 1 + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(create_validators_with_indices(3), 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_duplicate_validator_indices() { + let mut validators = create_validators_with_indices(3); + validators[2].index = 0; // Duplicate index + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(create_validators_with_indices(3), 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_validator_pubkey_mismatch() { + let validators = vec![create_test_validator()]; + let state = create_test_state(100, validators, 1000); + let genesis = create_genesis_state(vec![create_different_validator()], 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_finalized_after_state_slot() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_finalized.slot = 101; // Finalized after state slot + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_justified_before_finalized() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_finalized.slot = 50; + state.latest_justified.slot = 40; // Justified before finalized + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_accepts_justified_equals_finalized_with_matching_roots() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + let common_root = H256::from([42u8; 32]); + state.latest_finalized.slot = 50; + state.latest_finalized.root = common_root; + state.latest_justified.slot = 50; // Same slot + state.latest_justified.root = common_root; // Same root + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); + } + + #[test] + fn verify_rejects_justified_equals_finalized_with_different_roots() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_finalized.slot = 50; + state.latest_finalized.root = H256::from([1u8; 32]); + state.latest_justified.slot = 50; // Same slot + state.latest_justified.root = H256::from([2u8; 32]); // Different root - conflict! + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_rejects_block_header_slot_exceeds_state() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 101; // Block header slot exceeds state slot + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_accepts_block_header_matches_finalized_with_correct_root() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 50; + let block_root = state.latest_block_header.tree_hash_root(); + state.latest_finalized.slot = 50; + state.latest_finalized.root = block_root; + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); + } + + #[test] + fn verify_rejects_block_header_matches_finalized_with_wrong_root() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 50; + state.latest_finalized.slot = 50; + state.latest_finalized.root = H256::from([99u8; 32]); // Wrong root + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } + + #[test] + fn verify_accepts_block_header_matches_justified_with_correct_root() { + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 90; + let block_root = state.latest_block_header.tree_hash_root(); + state.latest_justified.slot = 90; + state.latest_justified.root = block_root; + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_ok()); + } + + #[test] + fn verify_rejects_block_header_matches_justified_with_wrong_root() { + use ethlambda_types::primitives::H256; + let validators = vec![create_test_validator()]; + let mut state = create_test_state(100, validators.clone(), 1000); + state.latest_block_header.slot = 90; + state.latest_justified.slot = 90; + state.latest_justified.root = H256::from([99u8; 32]); // Wrong root + let genesis = create_genesis_state(validators, 1000); + assert!(verify_checkpoint_state(&state, &genesis).is_err()); + } +} diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index aa438dc..90acbcc 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -1,3 +1,4 @@ +mod checkpoint_sync; mod version; use std::{ @@ -46,6 +47,10 @@ struct CliOptions { /// The node ID to look up in annotated_validators.yaml (e.g., "ethlambda_0") #[arg(long)] node_id: String, + /// URL of a peer to download checkpoint state from (e.g., http://peer:5052) + /// When set, skips genesis initialization and syncs from checkpoint. + #[arg(long)] + checkpoint_sync_url: Option, } #[tokio::main] @@ -93,9 +98,17 @@ async fn main() { let validator_keys = read_validator_keys(&validators_path, &validator_keys_dir, &options.node_id); - let genesis_state = State::from_genesis(&genesis, validators); let backend = Arc::new(RocksDBBackend::open("./data").expect("Failed to open RocksDB")); - let store = Store::from_genesis(backend, genesis_state); + + let mut state = State::from_genesis(&genesis, validators); + + if let Some(checkpoint_sync_url) = &options.checkpoint_sync_url { + state = fetch_initial_state(checkpoint_sync_url, state) + .await + .inspect_err(|err| error!(%err, "Failed to initialize state")) + .unwrap_or_else(|_| std::process::exit(1)); + } + let store = Store::from_anchor_state(backend, state); let (p2p_tx, p2p_rx) = tokio::sync::mpsc::unbounded_channel(); let blockchain = BlockChain::spawn(store.clone(), p2p_tx, validator_keys); @@ -278,3 +291,42 @@ fn read_hex_file_bytes(path: impl AsRef) -> Vec { }; bytes } + +/// Fetch the initial state for the node. +/// +/// If `checkpoint_url` is provided, performs checkpoint sync by downloading +/// and verifying the finalized state from a remote peer. Otherwise, creates +/// a genesis state from the local genesis configuration. +/// +/// # Arguments +/// +/// * `checkpoint_url` - Optional URL to fetch checkpoint state from +/// * `genesis` - Genesis configuration (for genesis_time verification and genesis state creation) +/// * `validators` - Validator set (moved for genesis state creation) +/// * `backend` - Storage backend for Store creation +/// +/// # Returns +/// +/// `Ok(Store)` on success, or `Err(CheckpointSyncError)` if checkpoint sync fails. +/// Genesis path is infallible and always returns `Ok`. +async fn fetch_initial_state( + checkpoint_url: &str, + genesis_state: State, +) -> Result { + // Checkpoint sync path + info!(%checkpoint_url, "Starting checkpoint sync"); + + let state = checkpoint_sync::fetch_checkpoint_state(checkpoint_url).await?; + + // Verify against local genesis config + checkpoint_sync::verify_checkpoint_state(&state, &genesis_state)?; + + info!( + slot = state.slot, + validators = state.validators.len(), + finalized_slot = state.latest_finalized.slot, + "Checkpoint sync complete" + ); + + Ok(state) +} diff --git a/crates/blockchain/src/lib.rs b/crates/blockchain/src/lib.rs index 8f58b39..510de5a 100644 --- a/crates/blockchain/src/lib.rs +++ b/crates/blockchain/src/lib.rs @@ -279,8 +279,8 @@ impl BlockChainServer { let parent_root = signed_block.message.block.parent_root; let proposer = signed_block.message.block.proposer_index; - // Check if parent block exists before attempting to process - if !self.store.contains_block(&parent_root) { + // Check if parent state exists before attempting to process + if !self.store.has_state(&parent_root) { info!(%slot, %parent_root, %block_root, "Block parent missing, storing as pending"); // Store block for later processing diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index c41af16..4cc787e 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -46,8 +46,14 @@ fn update_head(store: &mut Store) { store.update_checkpoints(ForkCheckpoints::head_only(new_head)); if old_head != new_head { - let old_slot = store.get_block(&old_head).map(|b| b.slot).unwrap_or(0); - let new_slot = store.get_block(&new_head).map(|b| b.slot).unwrap_or(0); + let old_slot = store + .get_block_header(&old_head) + .map(|h| h.slot) + .unwrap_or(0); + let new_slot = store + .get_block_header(&new_head) + .map(|h| h.slot) + .unwrap_or(0); let justified_slot = store.latest_justified().slot; let finalized_slot = store.latest_finalized().slot; info!( @@ -91,16 +97,16 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), let data = &attestation.data; // Availability Check - We cannot count a vote if we haven't seen the blocks involved. - let source_block = store - .get_block(&data.source.root) + let source_header = store + .get_block_header(&data.source.root) .ok_or(StoreError::UnknownSourceBlock(data.source.root))?; - let target_block = store - .get_block(&data.target.root) + let target_header = store + .get_block_header(&data.target.root) .ok_or(StoreError::UnknownTargetBlock(data.target.root))?; - if !store.contains_block(&data.head.root) { - return Err(StoreError::UnknownHeadBlock(data.head.root)); - } + let _ = store + .get_block_header(&data.head.root) + .ok_or(StoreError::UnknownHeadBlock(data.head.root))?; // Topology Check - Source must be older than Target. if data.source.slot > data.target.slot { @@ -108,16 +114,16 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), } // Consistency Check - Validate checkpoint slots match block slots. - if source_block.slot != data.source.slot { + if source_header.slot != data.source.slot { return Err(StoreError::SourceSlotMismatch { checkpoint_slot: data.source.slot, - block_slot: source_block.slot, + block_slot: source_header.slot, }); } - if target_block.slot != data.target.slot { + if target_header.slot != data.target.slot { return Err(StoreError::TargetSlotMismatch { checkpoint_slot: data.target.slot, - block_slot: target_block.slot, + block_slot: target_header.slot, }); } @@ -325,7 +331,7 @@ pub fn on_block( let slot = block.slot; // Skip duplicate blocks (idempotent operation) - if store.contains_block(&block_root) { + if store.has_state(&block_root) { return Ok(()); } @@ -441,12 +447,12 @@ pub fn on_block( pub fn get_attestation_target(store: &Store) -> Checkpoint { // Start from current head let mut target_block_root = store.head(); - let mut target_block = store - .get_block(&target_block_root) + let mut target_header = store + .get_block_header(&target_block_root) .expect("head block exists"); let safe_target_block_slot = store - .get_block(&store.safe_target()) + .get_block_header(&store.safe_target()) .expect("safe target exists") .slot; @@ -455,10 +461,10 @@ pub fn get_attestation_target(store: &Store) -> Checkpoint { // This ensures the target doesn't advance too far ahead of safe target, // providing a balance between liveness and safety. for _ in 0..JUSTIFICATION_LOOKBACK_SLOTS { - if target_block.slot > safe_target_block_slot { - target_block_root = target_block.parent_root; - target_block = store - .get_block(&target_block_root) + if target_header.slot > safe_target_block_slot { + target_block_root = target_header.parent_root; + target_header = store + .get_block_header(&target_block_root) .expect("parent block exists"); } else { break; @@ -469,15 +475,15 @@ pub fn get_attestation_target(store: &Store) -> Checkpoint { // // Walk back until we find a slot that satisfies justifiability rules // relative to the latest finalized checkpoint. - while !slot_is_justifiable_after(target_block.slot, store.latest_finalized().slot) { - target_block_root = target_block.parent_root; - target_block = store - .get_block(&target_block_root) + while !slot_is_justifiable_after(target_header.slot, store.latest_finalized().slot) { + target_block_root = target_header.parent_root; + target_header = store + .get_block_header(&target_block_root) .expect("parent block exists"); } Checkpoint { root: target_block_root, - slot: target_block.slot, + slot: target_header.slot, } } @@ -487,7 +493,7 @@ pub fn produce_attestation_data(store: &Store, slot: u64) -> AttestationData { let head_checkpoint = Checkpoint { root: store.head(), slot: store - .get_block(&store.head()) + .get_block_header(&store.head()) .expect("head block exists") .slot, }; @@ -1055,16 +1061,16 @@ fn is_reorg(old_head: H256, new_head: H256, store: &Store) -> bool { return false; } - let Some(old_head_block) = store.get_block(&old_head) else { + let Some(old_head_header) = store.get_block_header(&old_head) else { return false; }; - let Some(new_head_block) = store.get_block(&new_head) else { + let Some(new_head_header) = store.get_block_header(&new_head) else { return false; }; - let old_slot = old_head_block.slot; - let new_slot = new_head_block.slot; + let old_slot = old_head_header.slot; + let new_slot = new_head_header.slot; // Determine which head has the higher slot and walk back from it let (mut current_root, target_slot, target_root) = if new_slot >= old_slot { @@ -1074,12 +1080,12 @@ fn is_reorg(old_head: H256, new_head: H256, store: &Store) -> bool { }; // Walk back through the chain until we reach the target slot - while let Some(current_block) = store.get_block(¤t_root) { - if current_block.slot <= target_slot { + while let Some(current_header) = store.get_block_header(¤t_root) { + if current_header.slot <= target_slot { // We've reached the target slot - check if we're at the target block return current_root != target_root; } - current_root = current_block.parent_root; + current_root = current_header.parent_root; } // Couldn't walk back far enough (missing blocks in chain) diff --git a/crates/blockchain/tests/forkchoice_spectests.rs b/crates/blockchain/tests/forkchoice_spectests.rs index b35385c..e1c69e1 100644 --- a/crates/blockchain/tests/forkchoice_spectests.rs +++ b/crates/blockchain/tests/forkchoice_spectests.rs @@ -187,13 +187,13 @@ fn validate_checks( // Validate headSlot if let Some(expected_slot) = checks.head_slot { let head_root = st.head(); - let head_block = st - .get_block(&head_root) + let head_header = st + .get_block_header(&head_root) .ok_or_else(|| format!("Step {}: head block not found", step_idx))?; - if head_block.slot != expected_slot { + if head_header.slot != expected_slot { return Err(format!( "Step {}: headSlot mismatch: expected {}, got {}", - step_idx, expected_slot, head_block.slot + step_idx, expected_slot, head_header.slot ) .into()); } diff --git a/crates/common/types/src/block.rs b/crates/common/types/src/block.rs index d54e050..776dbb1 100644 --- a/crates/common/types/src/block.rs +++ b/crates/common/types/src/block.rs @@ -176,7 +176,7 @@ impl BlockSignaturesWithAttestation { /// /// Headers are smaller than full blocks. They're useful for tracking the chain /// without storing everything. -#[derive(Debug, Clone, Serialize, Encode, Decode, TreeHash)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Encode, Decode, TreeHash)] pub struct BlockHeader { /// The slot in which the block was proposed pub slot: u64, @@ -205,6 +205,38 @@ pub struct Block { pub body: BlockBody, } +impl Block { + /// Extract the block header, computing the body root. + pub fn header(&self) -> BlockHeader { + BlockHeader { + slot: self.slot, + proposer_index: self.proposer_index, + parent_root: self.parent_root, + state_root: self.state_root, + body_root: self.body.tree_hash_root(), + } + } + + /// Reconstruct a block from header and body. + /// + /// The caller should ensure that `header.body_root` matches `body.tree_hash_root()`. + /// This is verified with a debug assertion but not in release builds. + pub fn from_header_and_body(header: BlockHeader, body: BlockBody) -> Self { + debug_assert_eq!( + header.body_root, + body.tree_hash_root(), + "body root mismatch" + ); + Self { + slot: header.slot, + proposer_index: header.proposer_index, + parent_root: header.parent_root, + state_root: header.state_root, + body, + } + } +} + /// The body of a block, containing payload data. /// /// Currently, the main operation is voting. Validators submit attestations which are diff --git a/crates/net/p2p/src/req_resp/handlers.rs b/crates/net/p2p/src/req_resp/handlers.rs index 962a9fe..a11e06d 100644 --- a/crates/net/p2p/src/req_resp/handlers.rs +++ b/crates/net/p2p/src/req_resp/handlers.rs @@ -169,7 +169,10 @@ async fn handle_blocks_by_root_response( pub fn build_status(store: &Store) -> Status { let finalized = store.latest_finalized(); let head_root = store.head(); - let head_slot = store.get_block(&head_root).expect("head block exists").slot; + let head_slot = store + .get_block_header(&head_root) + .expect("head block exists") + .slot; Status { finalized, head: ethlambda_types::state::Checkpoint { diff --git a/crates/net/rpc/src/lib.rs b/crates/net/rpc/src/lib.rs index fb5100c..e1f2b9e 100644 --- a/crates/net/rpc/src/lib.rs +++ b/crates/net/rpc/src/lib.rs @@ -118,7 +118,7 @@ mod tests { async fn test_get_latest_justified_checkpoint() { let state = create_test_state(); let backend = Arc::new(InMemoryBackend::new()); - let store = Store::from_genesis(backend, state); + let store = Store::from_anchor_state(backend, state); let app = build_api_router(store.clone()); @@ -154,7 +154,7 @@ mod tests { let state = create_test_state(); let backend = Arc::new(InMemoryBackend::new()); - let store = Store::from_genesis(backend, state); + let store = Store::from_anchor_state(backend, state); // Get the expected state from the store let finalized = store.latest_finalized(); diff --git a/crates/storage/src/api/tables.rs b/crates/storage/src/api/tables.rs index 36ed953..deecdad 100644 --- a/crates/storage/src/api/tables.rs +++ b/crates/storage/src/api/tables.rs @@ -1,8 +1,10 @@ /// Tables in the storage layer. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Table { - /// Block storage: H256 -> Block - Blocks, + /// Block header storage: H256 -> BlockHeader + BlockHeaders, + /// Block body storage: H256 -> BlockBody + BlockBodies, /// Block signatures storage: H256 -> BlockSignaturesWithAttestation /// /// Stored separately from blocks because the genesis block has no signatures. @@ -29,8 +31,9 @@ pub enum Table { } /// All table variants. -pub const ALL_TABLES: [Table; 9] = [ - Table::Blocks, +pub const ALL_TABLES: [Table; 10] = [ + Table::BlockHeaders, + Table::BlockBodies, Table::BlockSignatures, Table::States, Table::LatestKnownAttestations, diff --git a/crates/storage/src/backend/rocksdb.rs b/crates/storage/src/backend/rocksdb.rs index b42053d..c25b128 100644 --- a/crates/storage/src/backend/rocksdb.rs +++ b/crates/storage/src/backend/rocksdb.rs @@ -12,7 +12,8 @@ use std::sync::Arc; /// Returns the column family name for a table. fn cf_name(table: Table) -> &'static str { match table { - Table::Blocks => "blocks", + Table::BlockHeaders => "block_headers", + Table::BlockBodies => "block_bodies", Table::BlockSignatures => "block_signatures", Table::States => "states", Table::LatestKnownAttestations => "latest_known_attestations", @@ -166,7 +167,10 @@ mod tests { let backend = RocksDBBackend::open(dir.path()).unwrap(); let mut batch = backend.begin_write().unwrap(); batch - .put_batch(Table::Blocks, vec![(b"key1".to_vec(), b"value1".to_vec())]) + .put_batch( + Table::BlockHeaders, + vec![(b"key1".to_vec(), b"value1".to_vec())], + ) .unwrap(); batch.commit().unwrap(); } @@ -175,7 +179,7 @@ mod tests { { let backend = RocksDBBackend::open(dir.path()).unwrap(); let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"key1").unwrap(); + let value = view.get(Table::BlockHeaders, b"key1").unwrap(); assert_eq!(value, Some(b"value1".to_vec())); } } diff --git a/crates/storage/src/backend/tests.rs b/crates/storage/src/backend/tests.rs index 7401577..f1a96b0 100644 --- a/crates/storage/src/backend/tests.rs +++ b/crates/storage/src/backend/tests.rs @@ -28,7 +28,7 @@ fn test_put_and_get(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_put_get_key".to_vec(), b"value1".to_vec())], ) .unwrap(); @@ -38,7 +38,7 @@ fn test_put_and_get(backend: &dyn StorageBackend) { // Read data { let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"test_put_get_key").unwrap(); + let value = view.get(Table::BlockHeaders, b"test_put_get_key").unwrap(); assert_eq!(value, Some(b"value1".to_vec())); } } @@ -49,7 +49,7 @@ fn test_delete(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_delete_key".to_vec(), b"value1".to_vec())], ) .unwrap(); @@ -60,7 +60,7 @@ fn test_delete(backend: &dyn StorageBackend) { { let mut batch = backend.begin_write().unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_delete_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_delete_key".to_vec()]) .unwrap(); batch.commit().unwrap(); } @@ -68,7 +68,7 @@ fn test_delete(backend: &dyn StorageBackend) { // Verify deleted { let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"test_delete_key").unwrap(); + let value = view.get(Table::BlockHeaders, b"test_delete_key").unwrap(); assert_eq!(value, None); } } @@ -109,7 +109,7 @@ fn test_prefix_iterator(backend: &dyn StorageBackend) { fn test_nonexistent_key(backend: &dyn StorageBackend) { let view = backend.begin_read().unwrap(); let value = view - .get(Table::Blocks, b"test_nonexistent_key_12345") + .get(Table::BlockHeaders, b"test_nonexistent_key_12345") .unwrap(); assert_eq!(value, None); } @@ -120,7 +120,7 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_del_put_key".to_vec(), b"old".to_vec())], ) .unwrap(); @@ -131,11 +131,11 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { { let mut batch = backend.begin_write().unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_del_put_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_del_put_key".to_vec()]) .unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_del_put_key".to_vec(), b"new".to_vec())], ) .unwrap(); @@ -144,7 +144,7 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { let view = backend.begin_read().unwrap(); assert_eq!( - view.get(Table::Blocks, b"test_del_put_key").unwrap(), + view.get(Table::BlockHeaders, b"test_del_put_key").unwrap(), Some(b"new".to_vec()) ); } @@ -155,18 +155,21 @@ fn test_put_then_delete(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_put_del_key".to_vec(), b"value".to_vec())], ) .unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_put_del_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_put_del_key".to_vec()]) .unwrap(); batch.commit().unwrap(); } let view = backend.begin_read().unwrap(); - assert_eq!(view.get(Table::Blocks, b"test_put_del_key").unwrap(), None); + assert_eq!( + view.get(Table::BlockHeaders, b"test_put_del_key").unwrap(), + None + ); } fn test_multiple_tables(backend: &dyn StorageBackend) { @@ -175,7 +178,7 @@ fn test_multiple_tables(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_multi_key".to_vec(), b"block".to_vec())], ) .unwrap(); @@ -192,7 +195,7 @@ fn test_multiple_tables(backend: &dyn StorageBackend) { { let view = backend.begin_read().unwrap(); assert_eq!( - view.get(Table::Blocks, b"test_multi_key").unwrap(), + view.get(Table::BlockHeaders, b"test_multi_key").unwrap(), Some(b"block".to_vec()) ); assert_eq!( diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index ed55223..b6ef9ca 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -1,5 +1,11 @@ use std::collections::{HashMap, HashSet}; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; + +/// The tree hash root of an empty block body. +/// +/// Used to detect genesis/anchor blocks that have no attestations, +/// allowing us to skip storing empty bodies and reconstruct them on read. +static EMPTY_BODY_ROOT: LazyLock = LazyLock::new(|| BlockBody::default().tree_hash_root()); use crate::api::{StorageBackend, Table}; use crate::types::{StoredAggregatedPayload, StoredSignature}; @@ -7,7 +13,7 @@ use crate::types::{StoredAggregatedPayload, StoredSignature}; use ethlambda_types::{ attestation::AttestationData, block::{ - AggregatedSignatureProof, Block, BlockBody, BlockSignaturesWithAttestation, + AggregatedSignatureProof, Block, BlockBody, BlockHeader, BlockSignaturesWithAttestation, BlockWithAttestation, SignedBlockWithAttestation, }, primitives::{ @@ -105,46 +111,94 @@ fn decode_live_chain_key(bytes: &[u8]) -> (u64, H256) { (slot, root) } -/// Underlying storage of the node. -/// Similar to the spec's `Store`, but backed by a pluggable storage backend. +/// Fork choice store backed by a pluggable storage backend. +/// +/// The Store maintains all state required for fork choice and block processing: +/// +/// - **Metadata**: time, config, head, safe_target, justified/finalized checkpoints +/// - **Blocks**: headers and bodies stored separately for efficient header-only queries +/// - **States**: beacon states indexed by block root +/// - **Attestations**: latest known and pending ("new") attestations per validator +/// - **Signatures**: gossip signatures and aggregated proofs for signature verification +/// - **LiveChain**: slot index for efficient fork choice traversal (pruned on finalization) +/// +/// # Constructors /// -/// All data is stored in the backend. Metadata fields (time, config, head, etc.) -/// are stored in the Metadata table with their field name as the key. +/// - [`from_anchor_state`](Self::from_anchor_state): Initialize from a checkpoint state (no block body) +/// - [`get_forkchoice_store`](Self::get_forkchoice_store): Initialize from state + block (stores body) #[derive(Clone)] pub struct Store { - /// Storage backend for all store data. backend: Arc, } impl Store { - /// Initialize a Store from a genesis state. - pub fn from_genesis(backend: Arc, mut genesis_state: State) -> Self { - // Ensure the header state root is zero before computing the state root - genesis_state.latest_block_header.state_root = H256::ZERO; - - let genesis_state_root = genesis_state.tree_hash_root(); - let genesis_block = Block { - slot: 0, - proposer_index: 0, - parent_root: H256::ZERO, - state_root: genesis_state_root, - body: BlockBody::default(), - }; - Self::get_forkchoice_store(backend, genesis_state, genesis_block) + /// Initialize a Store from an anchor state only. + /// + /// Uses the state's `latest_block_header` as the anchor block header. + /// No block body is stored since it's not available. + pub fn from_anchor_state(backend: Arc, anchor_state: State) -> Self { + Self::init_store(backend, anchor_state, None) } /// Initialize a Store from an anchor state and block. + /// + /// The block must match the state's `latest_block_header`. + /// Named to mirror the spec's `get_forkchoice_store` function. + /// + /// # Panics + /// + /// Panics if the block's header doesn't match the state's `latest_block_header` + /// (comparing all fields except `state_root`, which is computed internally). pub fn get_forkchoice_store( backend: Arc, anchor_state: State, anchor_block: Block, ) -> Self { + // Compare headers with state_root zeroed (init_store handles state_root separately) + let mut state_header = anchor_state.latest_block_header.clone(); + let mut block_header = anchor_block.header(); + state_header.state_root = H256::ZERO; + block_header.state_root = H256::ZERO; + + assert_eq!( + state_header, block_header, + "block header doesn't match state's latest_block_header" + ); + + Self::init_store(backend, anchor_state, Some(anchor_block.body)) + } + + /// Internal helper to initialize the store with anchor data. + /// + /// Header is taken from `anchor_state.latest_block_header`. + fn init_store( + backend: Arc, + mut anchor_state: State, + anchor_body: Option, + ) -> Self { + // Save original state_root for validation + let original_state_root = anchor_state.latest_block_header.state_root; + + // Zero out state_root before computing (state contains header, header contains state_root) + anchor_state.latest_block_header.state_root = H256::ZERO; + + // Compute state root with zeroed header let anchor_state_root = anchor_state.tree_hash_root(); - let anchor_block_root = anchor_block.tree_hash_root(); + + // Validate: original must be zero (genesis) or match computed (checkpoint sync) + assert!( + original_state_root == H256::ZERO || original_state_root == anchor_state_root, + "anchor header state_root mismatch: expected {anchor_state_root:?}, got {original_state_root:?}" + ); + + // Populate the correct state_root + anchor_state.latest_block_header.state_root = anchor_state_root; + + let anchor_block_root = anchor_state.latest_block_header.tree_hash_root(); let anchor_checkpoint = Checkpoint { root: anchor_block_root, - slot: anchor_block.slot, + slot: anchor_state.latest_block_header.slot, }; // Insert initial data @@ -170,15 +224,24 @@ impl Store { .put_batch(Table::Metadata, metadata_entries) .expect("put metadata"); - // Block and state - let block_entries = vec![( + // Block header + let header_entries = vec![( anchor_block_root.as_ssz_bytes(), - anchor_block.as_ssz_bytes(), + anchor_state.latest_block_header.as_ssz_bytes(), )]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + // Block body (if provided) + if let Some(body) = anchor_body { + let body_entries = vec![(anchor_block_root.as_ssz_bytes(), body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); + } + // State let state_entries = vec![( anchor_block_root.as_ssz_bytes(), anchor_state.as_ssz_bytes(), @@ -187,14 +250,14 @@ impl Store { .put_batch(Table::States, state_entries) .expect("put state"); - // Non-finalized chain index + // Live chain index let index_entries = vec![( - encode_live_chain_key(anchor_block.slot, &anchor_block_root), - anchor_block.parent_root.as_ssz_bytes(), + encode_live_chain_key(anchor_state.latest_block_header.slot, &anchor_block_root), + anchor_state.latest_block_header.parent_root.as_ssz_bytes(), )]; batch .put_batch(Table::LiveChain, index_entries) - .expect("put non-finalized chain index"); + .expect("put live chain index"); batch.commit().expect("commit"); } @@ -225,44 +288,50 @@ impl Store { // ============ Time ============ + /// Returns the current store time in seconds since genesis. pub fn time(&self) -> u64 { self.get_metadata(KEY_TIME) } + /// Sets the current store time. pub fn set_time(&mut self, time: u64) { self.set_metadata(KEY_TIME, &time); } // ============ Config ============ + /// Returns the chain configuration. pub fn config(&self) -> ChainConfig { self.get_metadata(KEY_CONFIG) } // ============ Head ============ + /// Returns the current head block root. pub fn head(&self) -> H256 { self.get_metadata(KEY_HEAD) } // ============ Safe Target ============ + /// Returns the safe target block root for attestations. pub fn safe_target(&self) -> H256 { self.get_metadata(KEY_SAFE_TARGET) } + /// Sets the safe target block root. pub fn set_safe_target(&mut self, safe_target: H256) { self.set_metadata(KEY_SAFE_TARGET, &safe_target); } - // ============ Latest Justified ============ + // ============ Checkpoints ============ + /// Returns the latest justified checkpoint. pub fn latest_justified(&self) -> Checkpoint { self.get_metadata(KEY_LATEST_JUSTIFIED) } - // ============ Latest Finalized ============ - + /// Returns the latest finalized checkpoint. pub fn latest_finalized(&self) -> Checkpoint { self.get_metadata(KEY_LATEST_FINALIZED) } @@ -454,36 +523,12 @@ impl Store { removed_count } - pub fn get_block(&self, root: &H256) -> Option { + /// Get the block header by root. + pub fn get_block_header(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); - view.get(Table::Blocks, &root.as_ssz_bytes()) + view.get(Table::BlockHeaders, &root.as_ssz_bytes()) .expect("get") - .map(|bytes| Block::from_ssz_bytes(&bytes).expect("valid block")) - } - - pub fn contains_block(&self, root: &H256) -> bool { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::Blocks, &root.as_ssz_bytes()) - .expect("get") - .is_some() - } - - pub fn insert_block(&mut self, root: H256, block: Block) { - let mut batch = self.backend.begin_write().expect("write batch"); - let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; - batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); - - let index_entries = vec![( - encode_live_chain_key(block.slot, &root), - block.parent_root.as_ssz_bytes(), - )]; - batch - .put_batch(Table::LiveChain, index_entries) - .expect("put non-finalized chain index"); - - batch.commit().expect("commit"); + .map(|bytes| BlockHeader::from_ssz_bytes(&bytes).expect("valid header")) } // ============ Signed Blocks ============ @@ -513,10 +558,19 @@ impl Store { let mut batch = self.backend.begin_write().expect("write batch"); - let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; + let header = block.header(); + let header_entries = vec![(root.as_ssz_bytes(), header.as_ssz_bytes())]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + // Skip storing empty bodies - they can be reconstructed from the header's body_root + if header.body_root != *EMPTY_BODY_ROOT { + let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); + } let sig_entries = vec![(root.as_ssz_bytes(), signatures.as_ssz_bytes())]; batch @@ -534,18 +588,28 @@ impl Store { batch.commit().expect("commit"); } - /// Get a signed block by combining block and signatures. + /// Get a signed block by combining header, body, and signatures. /// - /// Returns None if either the block or signatures are not found. + /// Returns None if any of the components are not found. /// Note: Genesis block has no entry in BlockSignatures table. pub fn get_signed_block(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); let key = root.as_ssz_bytes(); - let block_bytes = view.get(Table::Blocks, &key).expect("get")?; + let header_bytes = view.get(Table::BlockHeaders, &key).expect("get")?; let sig_bytes = view.get(Table::BlockSignatures, &key).expect("get")?; - let block = Block::from_ssz_bytes(&block_bytes).expect("valid block"); + let header = BlockHeader::from_ssz_bytes(&header_bytes).expect("valid header"); + + // Use empty body if header indicates empty, otherwise fetch from DB + let body = if header.body_root == *EMPTY_BODY_ROOT { + BlockBody::default() + } else { + let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; + BlockBody::from_ssz_bytes(&body_bytes).expect("valid body") + }; + + let block = Block::from_header_and_body(header, body); let signatures = BlockSignaturesWithAttestation::from_ssz_bytes(&sig_bytes).expect("valid signatures"); @@ -554,29 +618,23 @@ impl Store { // ============ States ============ - /// Iterate over all (root, state) pairs. - pub fn iter_states(&self) -> impl Iterator + '_ { + /// Returns the state for the given block root. + pub fn get_state(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); - let entries: Vec<_> = view - .prefix_iterator(Table::States, &[]) - .expect("iterator") - .filter_map(|res| res.ok()) - .map(|(k, v)| { - let root = H256::from_ssz_bytes(&k).expect("valid root"); - let state = State::from_ssz_bytes(&v).expect("valid state"); - (root, state) - }) - .collect(); - entries.into_iter() + view.get(Table::States, &root.as_ssz_bytes()) + .expect("get") + .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) } - pub fn get_state(&self, root: &H256) -> Option { + /// Returns whether a state exists for the given block root. + pub fn has_state(&self, root: &H256) -> bool { let view = self.backend.begin_read().expect("read view"); view.get(Table::States, &root.as_ssz_bytes()) .expect("get") - .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) + .is_some() } + /// Stores a state indexed by block root. pub fn insert_state(&mut self, root: H256, state: State) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(root.as_ssz_bytes(), state.as_ssz_bytes())]; @@ -584,9 +642,12 @@ impl Store { batch.commit().expect("commit"); } - // ============ Latest Known Attestations ============ + // ============ Known Attestations ============ + // + // "Known" attestations are included in fork choice weight calculations. + // They're promoted from "new" attestations at specific intervals. - /// Iterate over all (validator_id, attestation_data) pairs for known attestations. + /// Iterates over all known attestations (validator_id, attestation_data). pub fn iter_known_attestations(&self) -> impl Iterator + '_ { let view = self.backend.begin_read().expect("read view"); let entries: Vec<_> = view @@ -602,6 +663,7 @@ impl Store { entries.into_iter() } + /// Returns a validator's latest known attestation. pub fn get_known_attestation(&self, validator_id: &u64) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::LatestKnownAttestations, &validator_id.as_ssz_bytes()) @@ -609,6 +671,7 @@ impl Store { .map(|bytes| AttestationData::from_ssz_bytes(&bytes).expect("valid attestation data")) } + /// Stores a validator's latest known attestation. pub fn insert_known_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; @@ -618,9 +681,12 @@ impl Store { batch.commit().expect("commit"); } - // ============ Latest New Attestations ============ + // ============ New Attestations ============ + // + // "New" attestations are pending attestations not yet included in fork choice. + // They're promoted to "known" via `promote_new_attestations`. - /// Iterate over all (validator_id, attestation_data) pairs for new attestations. + /// Iterates over all new (pending) attestations. pub fn iter_new_attestations(&self) -> impl Iterator + '_ { let view = self.backend.begin_read().expect("read view"); let entries: Vec<_> = view @@ -636,6 +702,7 @@ impl Store { entries.into_iter() } + /// Returns a validator's latest new (pending) attestation. pub fn get_new_attestation(&self, validator_id: &u64) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::LatestNewAttestations, &validator_id.as_ssz_bytes()) @@ -643,6 +710,7 @@ impl Store { .map(|bytes| AttestationData::from_ssz_bytes(&bytes).expect("valid attestation data")) } + /// Stores a validator's new (pending) attestation. pub fn insert_new_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; @@ -652,6 +720,7 @@ impl Store { batch.commit().expect("commit"); } + /// Removes a validator's new (pending) attestation. pub fn remove_new_attestation(&mut self, validator_id: &u64) { let mut batch = self.backend.begin_write().expect("write batch"); batch @@ -695,8 +764,11 @@ impl Store { } // ============ Gossip Signatures ============ + // + // Gossip signatures are individual validator signatures received via P2P. + // They're aggregated into proofs for block signature verification. - /// Iterate over all (signature_key, stored_signature) pairs. + /// Iterates over all gossip signatures. pub fn iter_gossip_signatures( &self, ) -> impl Iterator + '_ { @@ -715,20 +787,7 @@ impl Store { entries.into_iter() } - pub fn get_gossip_signature(&self, key: &SignatureKey) -> Option { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::GossipSignatures, &encode_signature_key(key)) - .expect("get") - .and_then(|bytes| StoredSignature::from_ssz_bytes(&bytes).ok()) - } - - pub fn contains_gossip_signature(&self, key: &SignatureKey) -> bool { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::GossipSignatures, &encode_signature_key(key)) - .expect("get") - .is_some() - } - + /// Stores a gossip signature for later aggregation. pub fn insert_gossip_signature( &mut self, attestation_data: &AttestationData, @@ -749,8 +808,11 @@ impl Store { } // ============ Aggregated Payloads ============ + // + // Aggregated payloads are leanVM proofs combining multiple signatures. + // Used to verify block signatures efficiently. - /// Iterate over all (signature_key, stored_payloads) pairs. + /// Iterates over all aggregated signature payloads. pub fn iter_aggregated_payloads( &self, ) -> impl Iterator)> + '_ { @@ -769,10 +831,8 @@ impl Store { entries.into_iter() } - pub fn get_aggregated_payloads( - &self, - key: &SignatureKey, - ) -> Option> { + /// Returns aggregated payloads for a signature key. + fn get_aggregated_payloads(&self, key: &SignatureKey) -> Option> { let view = self.backend.begin_read().expect("read view"); view.get(Table::AggregatedPayloads, &encode_signature_key(key)) .expect("get") @@ -821,7 +881,7 @@ impl Store { /// Returns the slot of the current safe target block. pub fn safe_target_slot(&self) -> u64 { - self.get_block(&self.safe_target()) + self.get_block_header(&self.safe_target()) .expect("safe target exists") .slot }