diff --git a/bin/ethlambda/src/main.rs b/bin/ethlambda/src/main.rs index aa438dc..b399a74 100644 --- a/bin/ethlambda/src/main.rs +++ b/bin/ethlambda/src/main.rs @@ -95,7 +95,7 @@ async fn main() { let genesis_state = State::from_genesis(&genesis, validators); let backend = Arc::new(RocksDBBackend::open("./data").expect("Failed to open RocksDB")); - let store = Store::from_genesis(backend, genesis_state); + let store = Store::from_anchor_state(backend, genesis_state); let (p2p_tx, p2p_rx) = tokio::sync::mpsc::unbounded_channel(); let blockchain = BlockChain::spawn(store.clone(), p2p_tx, validator_keys); diff --git a/crates/blockchain/src/lib.rs b/crates/blockchain/src/lib.rs index 8f58b39..510de5a 100644 --- a/crates/blockchain/src/lib.rs +++ b/crates/blockchain/src/lib.rs @@ -279,8 +279,8 @@ impl BlockChainServer { let parent_root = signed_block.message.block.parent_root; let proposer = signed_block.message.block.proposer_index; - // Check if parent block exists before attempting to process - if !self.store.contains_block(&parent_root) { + // Check if parent state exists before attempting to process + if !self.store.has_state(&parent_root) { info!(%slot, %parent_root, %block_root, "Block parent missing, storing as pending"); // Store block for later processing diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index c41af16..4cc787e 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -46,8 +46,14 @@ fn update_head(store: &mut Store) { store.update_checkpoints(ForkCheckpoints::head_only(new_head)); if old_head != new_head { - let old_slot = store.get_block(&old_head).map(|b| b.slot).unwrap_or(0); - let new_slot = store.get_block(&new_head).map(|b| b.slot).unwrap_or(0); + let old_slot = store + .get_block_header(&old_head) + .map(|h| h.slot) + .unwrap_or(0); + let new_slot = store + .get_block_header(&new_head) + .map(|h| h.slot) + .unwrap_or(0); let justified_slot = store.latest_justified().slot; let finalized_slot = store.latest_finalized().slot; info!( @@ -91,16 +97,16 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), let data = &attestation.data; // Availability Check - We cannot count a vote if we haven't seen the blocks involved. - let source_block = store - .get_block(&data.source.root) + let source_header = store + .get_block_header(&data.source.root) .ok_or(StoreError::UnknownSourceBlock(data.source.root))?; - let target_block = store - .get_block(&data.target.root) + let target_header = store + .get_block_header(&data.target.root) .ok_or(StoreError::UnknownTargetBlock(data.target.root))?; - if !store.contains_block(&data.head.root) { - return Err(StoreError::UnknownHeadBlock(data.head.root)); - } + let _ = store + .get_block_header(&data.head.root) + .ok_or(StoreError::UnknownHeadBlock(data.head.root))?; // Topology Check - Source must be older than Target. if data.source.slot > data.target.slot { @@ -108,16 +114,16 @@ fn validate_attestation(store: &Store, attestation: &Attestation) -> Result<(), } // Consistency Check - Validate checkpoint slots match block slots. - if source_block.slot != data.source.slot { + if source_header.slot != data.source.slot { return Err(StoreError::SourceSlotMismatch { checkpoint_slot: data.source.slot, - block_slot: source_block.slot, + block_slot: source_header.slot, }); } - if target_block.slot != data.target.slot { + if target_header.slot != data.target.slot { return Err(StoreError::TargetSlotMismatch { checkpoint_slot: data.target.slot, - block_slot: target_block.slot, + block_slot: target_header.slot, }); } @@ -325,7 +331,7 @@ pub fn on_block( let slot = block.slot; // Skip duplicate blocks (idempotent operation) - if store.contains_block(&block_root) { + if store.has_state(&block_root) { return Ok(()); } @@ -441,12 +447,12 @@ pub fn on_block( pub fn get_attestation_target(store: &Store) -> Checkpoint { // Start from current head let mut target_block_root = store.head(); - let mut target_block = store - .get_block(&target_block_root) + let mut target_header = store + .get_block_header(&target_block_root) .expect("head block exists"); let safe_target_block_slot = store - .get_block(&store.safe_target()) + .get_block_header(&store.safe_target()) .expect("safe target exists") .slot; @@ -455,10 +461,10 @@ pub fn get_attestation_target(store: &Store) -> Checkpoint { // This ensures the target doesn't advance too far ahead of safe target, // providing a balance between liveness and safety. for _ in 0..JUSTIFICATION_LOOKBACK_SLOTS { - if target_block.slot > safe_target_block_slot { - target_block_root = target_block.parent_root; - target_block = store - .get_block(&target_block_root) + if target_header.slot > safe_target_block_slot { + target_block_root = target_header.parent_root; + target_header = store + .get_block_header(&target_block_root) .expect("parent block exists"); } else { break; @@ -469,15 +475,15 @@ pub fn get_attestation_target(store: &Store) -> Checkpoint { // // Walk back until we find a slot that satisfies justifiability rules // relative to the latest finalized checkpoint. - while !slot_is_justifiable_after(target_block.slot, store.latest_finalized().slot) { - target_block_root = target_block.parent_root; - target_block = store - .get_block(&target_block_root) + while !slot_is_justifiable_after(target_header.slot, store.latest_finalized().slot) { + target_block_root = target_header.parent_root; + target_header = store + .get_block_header(&target_block_root) .expect("parent block exists"); } Checkpoint { root: target_block_root, - slot: target_block.slot, + slot: target_header.slot, } } @@ -487,7 +493,7 @@ pub fn produce_attestation_data(store: &Store, slot: u64) -> AttestationData { let head_checkpoint = Checkpoint { root: store.head(), slot: store - .get_block(&store.head()) + .get_block_header(&store.head()) .expect("head block exists") .slot, }; @@ -1055,16 +1061,16 @@ fn is_reorg(old_head: H256, new_head: H256, store: &Store) -> bool { return false; } - let Some(old_head_block) = store.get_block(&old_head) else { + let Some(old_head_header) = store.get_block_header(&old_head) else { return false; }; - let Some(new_head_block) = store.get_block(&new_head) else { + let Some(new_head_header) = store.get_block_header(&new_head) else { return false; }; - let old_slot = old_head_block.slot; - let new_slot = new_head_block.slot; + let old_slot = old_head_header.slot; + let new_slot = new_head_header.slot; // Determine which head has the higher slot and walk back from it let (mut current_root, target_slot, target_root) = if new_slot >= old_slot { @@ -1074,12 +1080,12 @@ fn is_reorg(old_head: H256, new_head: H256, store: &Store) -> bool { }; // Walk back through the chain until we reach the target slot - while let Some(current_block) = store.get_block(¤t_root) { - if current_block.slot <= target_slot { + while let Some(current_header) = store.get_block_header(¤t_root) { + if current_header.slot <= target_slot { // We've reached the target slot - check if we're at the target block return current_root != target_root; } - current_root = current_block.parent_root; + current_root = current_header.parent_root; } // Couldn't walk back far enough (missing blocks in chain) diff --git a/crates/blockchain/tests/forkchoice_spectests.rs b/crates/blockchain/tests/forkchoice_spectests.rs index b35385c..e1c69e1 100644 --- a/crates/blockchain/tests/forkchoice_spectests.rs +++ b/crates/blockchain/tests/forkchoice_spectests.rs @@ -187,13 +187,13 @@ fn validate_checks( // Validate headSlot if let Some(expected_slot) = checks.head_slot { let head_root = st.head(); - let head_block = st - .get_block(&head_root) + let head_header = st + .get_block_header(&head_root) .ok_or_else(|| format!("Step {}: head block not found", step_idx))?; - if head_block.slot != expected_slot { + if head_header.slot != expected_slot { return Err(format!( "Step {}: headSlot mismatch: expected {}, got {}", - step_idx, expected_slot, head_block.slot + step_idx, expected_slot, head_header.slot ) .into()); } diff --git a/crates/common/types/src/block.rs b/crates/common/types/src/block.rs index d54e050..776dbb1 100644 --- a/crates/common/types/src/block.rs +++ b/crates/common/types/src/block.rs @@ -176,7 +176,7 @@ impl BlockSignaturesWithAttestation { /// /// Headers are smaller than full blocks. They're useful for tracking the chain /// without storing everything. -#[derive(Debug, Clone, Serialize, Encode, Decode, TreeHash)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Encode, Decode, TreeHash)] pub struct BlockHeader { /// The slot in which the block was proposed pub slot: u64, @@ -205,6 +205,38 @@ pub struct Block { pub body: BlockBody, } +impl Block { + /// Extract the block header, computing the body root. + pub fn header(&self) -> BlockHeader { + BlockHeader { + slot: self.slot, + proposer_index: self.proposer_index, + parent_root: self.parent_root, + state_root: self.state_root, + body_root: self.body.tree_hash_root(), + } + } + + /// Reconstruct a block from header and body. + /// + /// The caller should ensure that `header.body_root` matches `body.tree_hash_root()`. + /// This is verified with a debug assertion but not in release builds. + pub fn from_header_and_body(header: BlockHeader, body: BlockBody) -> Self { + debug_assert_eq!( + header.body_root, + body.tree_hash_root(), + "body root mismatch" + ); + Self { + slot: header.slot, + proposer_index: header.proposer_index, + parent_root: header.parent_root, + state_root: header.state_root, + body, + } + } +} + /// The body of a block, containing payload data. /// /// Currently, the main operation is voting. Validators submit attestations which are diff --git a/crates/net/p2p/src/req_resp/handlers.rs b/crates/net/p2p/src/req_resp/handlers.rs index 962a9fe..a11e06d 100644 --- a/crates/net/p2p/src/req_resp/handlers.rs +++ b/crates/net/p2p/src/req_resp/handlers.rs @@ -169,7 +169,10 @@ async fn handle_blocks_by_root_response( pub fn build_status(store: &Store) -> Status { let finalized = store.latest_finalized(); let head_root = store.head(); - let head_slot = store.get_block(&head_root).expect("head block exists").slot; + let head_slot = store + .get_block_header(&head_root) + .expect("head block exists") + .slot; Status { finalized, head: ethlambda_types::state::Checkpoint { diff --git a/crates/net/rpc/src/lib.rs b/crates/net/rpc/src/lib.rs index fb5100c..e1f2b9e 100644 --- a/crates/net/rpc/src/lib.rs +++ b/crates/net/rpc/src/lib.rs @@ -118,7 +118,7 @@ mod tests { async fn test_get_latest_justified_checkpoint() { let state = create_test_state(); let backend = Arc::new(InMemoryBackend::new()); - let store = Store::from_genesis(backend, state); + let store = Store::from_anchor_state(backend, state); let app = build_api_router(store.clone()); @@ -154,7 +154,7 @@ mod tests { let state = create_test_state(); let backend = Arc::new(InMemoryBackend::new()); - let store = Store::from_genesis(backend, state); + let store = Store::from_anchor_state(backend, state); // Get the expected state from the store let finalized = store.latest_finalized(); diff --git a/crates/storage/src/api/tables.rs b/crates/storage/src/api/tables.rs index 36ed953..deecdad 100644 --- a/crates/storage/src/api/tables.rs +++ b/crates/storage/src/api/tables.rs @@ -1,8 +1,10 @@ /// Tables in the storage layer. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Table { - /// Block storage: H256 -> Block - Blocks, + /// Block header storage: H256 -> BlockHeader + BlockHeaders, + /// Block body storage: H256 -> BlockBody + BlockBodies, /// Block signatures storage: H256 -> BlockSignaturesWithAttestation /// /// Stored separately from blocks because the genesis block has no signatures. @@ -29,8 +31,9 @@ pub enum Table { } /// All table variants. -pub const ALL_TABLES: [Table; 9] = [ - Table::Blocks, +pub const ALL_TABLES: [Table; 10] = [ + Table::BlockHeaders, + Table::BlockBodies, Table::BlockSignatures, Table::States, Table::LatestKnownAttestations, diff --git a/crates/storage/src/backend/rocksdb.rs b/crates/storage/src/backend/rocksdb.rs index b42053d..c25b128 100644 --- a/crates/storage/src/backend/rocksdb.rs +++ b/crates/storage/src/backend/rocksdb.rs @@ -12,7 +12,8 @@ use std::sync::Arc; /// Returns the column family name for a table. fn cf_name(table: Table) -> &'static str { match table { - Table::Blocks => "blocks", + Table::BlockHeaders => "block_headers", + Table::BlockBodies => "block_bodies", Table::BlockSignatures => "block_signatures", Table::States => "states", Table::LatestKnownAttestations => "latest_known_attestations", @@ -166,7 +167,10 @@ mod tests { let backend = RocksDBBackend::open(dir.path()).unwrap(); let mut batch = backend.begin_write().unwrap(); batch - .put_batch(Table::Blocks, vec![(b"key1".to_vec(), b"value1".to_vec())]) + .put_batch( + Table::BlockHeaders, + vec![(b"key1".to_vec(), b"value1".to_vec())], + ) .unwrap(); batch.commit().unwrap(); } @@ -175,7 +179,7 @@ mod tests { { let backend = RocksDBBackend::open(dir.path()).unwrap(); let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"key1").unwrap(); + let value = view.get(Table::BlockHeaders, b"key1").unwrap(); assert_eq!(value, Some(b"value1".to_vec())); } } diff --git a/crates/storage/src/backend/tests.rs b/crates/storage/src/backend/tests.rs index 7401577..f1a96b0 100644 --- a/crates/storage/src/backend/tests.rs +++ b/crates/storage/src/backend/tests.rs @@ -28,7 +28,7 @@ fn test_put_and_get(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_put_get_key".to_vec(), b"value1".to_vec())], ) .unwrap(); @@ -38,7 +38,7 @@ fn test_put_and_get(backend: &dyn StorageBackend) { // Read data { let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"test_put_get_key").unwrap(); + let value = view.get(Table::BlockHeaders, b"test_put_get_key").unwrap(); assert_eq!(value, Some(b"value1".to_vec())); } } @@ -49,7 +49,7 @@ fn test_delete(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_delete_key".to_vec(), b"value1".to_vec())], ) .unwrap(); @@ -60,7 +60,7 @@ fn test_delete(backend: &dyn StorageBackend) { { let mut batch = backend.begin_write().unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_delete_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_delete_key".to_vec()]) .unwrap(); batch.commit().unwrap(); } @@ -68,7 +68,7 @@ fn test_delete(backend: &dyn StorageBackend) { // Verify deleted { let view = backend.begin_read().unwrap(); - let value = view.get(Table::Blocks, b"test_delete_key").unwrap(); + let value = view.get(Table::BlockHeaders, b"test_delete_key").unwrap(); assert_eq!(value, None); } } @@ -109,7 +109,7 @@ fn test_prefix_iterator(backend: &dyn StorageBackend) { fn test_nonexistent_key(backend: &dyn StorageBackend) { let view = backend.begin_read().unwrap(); let value = view - .get(Table::Blocks, b"test_nonexistent_key_12345") + .get(Table::BlockHeaders, b"test_nonexistent_key_12345") .unwrap(); assert_eq!(value, None); } @@ -120,7 +120,7 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_del_put_key".to_vec(), b"old".to_vec())], ) .unwrap(); @@ -131,11 +131,11 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { { let mut batch = backend.begin_write().unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_del_put_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_del_put_key".to_vec()]) .unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_del_put_key".to_vec(), b"new".to_vec())], ) .unwrap(); @@ -144,7 +144,7 @@ fn test_delete_then_put(backend: &dyn StorageBackend) { let view = backend.begin_read().unwrap(); assert_eq!( - view.get(Table::Blocks, b"test_del_put_key").unwrap(), + view.get(Table::BlockHeaders, b"test_del_put_key").unwrap(), Some(b"new".to_vec()) ); } @@ -155,18 +155,21 @@ fn test_put_then_delete(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_put_del_key".to_vec(), b"value".to_vec())], ) .unwrap(); batch - .delete_batch(Table::Blocks, vec![b"test_put_del_key".to_vec()]) + .delete_batch(Table::BlockHeaders, vec![b"test_put_del_key".to_vec()]) .unwrap(); batch.commit().unwrap(); } let view = backend.begin_read().unwrap(); - assert_eq!(view.get(Table::Blocks, b"test_put_del_key").unwrap(), None); + assert_eq!( + view.get(Table::BlockHeaders, b"test_put_del_key").unwrap(), + None + ); } fn test_multiple_tables(backend: &dyn StorageBackend) { @@ -175,7 +178,7 @@ fn test_multiple_tables(backend: &dyn StorageBackend) { let mut batch = backend.begin_write().unwrap(); batch .put_batch( - Table::Blocks, + Table::BlockHeaders, vec![(b"test_multi_key".to_vec(), b"block".to_vec())], ) .unwrap(); @@ -192,7 +195,7 @@ fn test_multiple_tables(backend: &dyn StorageBackend) { { let view = backend.begin_read().unwrap(); assert_eq!( - view.get(Table::Blocks, b"test_multi_key").unwrap(), + view.get(Table::BlockHeaders, b"test_multi_key").unwrap(), Some(b"block".to_vec()) ); assert_eq!( diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index ed55223..b6ef9ca 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -1,5 +1,11 @@ use std::collections::{HashMap, HashSet}; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; + +/// The tree hash root of an empty block body. +/// +/// Used to detect genesis/anchor blocks that have no attestations, +/// allowing us to skip storing empty bodies and reconstruct them on read. +static EMPTY_BODY_ROOT: LazyLock = LazyLock::new(|| BlockBody::default().tree_hash_root()); use crate::api::{StorageBackend, Table}; use crate::types::{StoredAggregatedPayload, StoredSignature}; @@ -7,7 +13,7 @@ use crate::types::{StoredAggregatedPayload, StoredSignature}; use ethlambda_types::{ attestation::AttestationData, block::{ - AggregatedSignatureProof, Block, BlockBody, BlockSignaturesWithAttestation, + AggregatedSignatureProof, Block, BlockBody, BlockHeader, BlockSignaturesWithAttestation, BlockWithAttestation, SignedBlockWithAttestation, }, primitives::{ @@ -105,46 +111,94 @@ fn decode_live_chain_key(bytes: &[u8]) -> (u64, H256) { (slot, root) } -/// Underlying storage of the node. -/// Similar to the spec's `Store`, but backed by a pluggable storage backend. +/// Fork choice store backed by a pluggable storage backend. +/// +/// The Store maintains all state required for fork choice and block processing: +/// +/// - **Metadata**: time, config, head, safe_target, justified/finalized checkpoints +/// - **Blocks**: headers and bodies stored separately for efficient header-only queries +/// - **States**: beacon states indexed by block root +/// - **Attestations**: latest known and pending ("new") attestations per validator +/// - **Signatures**: gossip signatures and aggregated proofs for signature verification +/// - **LiveChain**: slot index for efficient fork choice traversal (pruned on finalization) +/// +/// # Constructors /// -/// All data is stored in the backend. Metadata fields (time, config, head, etc.) -/// are stored in the Metadata table with their field name as the key. +/// - [`from_anchor_state`](Self::from_anchor_state): Initialize from a checkpoint state (no block body) +/// - [`get_forkchoice_store`](Self::get_forkchoice_store): Initialize from state + block (stores body) #[derive(Clone)] pub struct Store { - /// Storage backend for all store data. backend: Arc, } impl Store { - /// Initialize a Store from a genesis state. - pub fn from_genesis(backend: Arc, mut genesis_state: State) -> Self { - // Ensure the header state root is zero before computing the state root - genesis_state.latest_block_header.state_root = H256::ZERO; - - let genesis_state_root = genesis_state.tree_hash_root(); - let genesis_block = Block { - slot: 0, - proposer_index: 0, - parent_root: H256::ZERO, - state_root: genesis_state_root, - body: BlockBody::default(), - }; - Self::get_forkchoice_store(backend, genesis_state, genesis_block) + /// Initialize a Store from an anchor state only. + /// + /// Uses the state's `latest_block_header` as the anchor block header. + /// No block body is stored since it's not available. + pub fn from_anchor_state(backend: Arc, anchor_state: State) -> Self { + Self::init_store(backend, anchor_state, None) } /// Initialize a Store from an anchor state and block. + /// + /// The block must match the state's `latest_block_header`. + /// Named to mirror the spec's `get_forkchoice_store` function. + /// + /// # Panics + /// + /// Panics if the block's header doesn't match the state's `latest_block_header` + /// (comparing all fields except `state_root`, which is computed internally). pub fn get_forkchoice_store( backend: Arc, anchor_state: State, anchor_block: Block, ) -> Self { + // Compare headers with state_root zeroed (init_store handles state_root separately) + let mut state_header = anchor_state.latest_block_header.clone(); + let mut block_header = anchor_block.header(); + state_header.state_root = H256::ZERO; + block_header.state_root = H256::ZERO; + + assert_eq!( + state_header, block_header, + "block header doesn't match state's latest_block_header" + ); + + Self::init_store(backend, anchor_state, Some(anchor_block.body)) + } + + /// Internal helper to initialize the store with anchor data. + /// + /// Header is taken from `anchor_state.latest_block_header`. + fn init_store( + backend: Arc, + mut anchor_state: State, + anchor_body: Option, + ) -> Self { + // Save original state_root for validation + let original_state_root = anchor_state.latest_block_header.state_root; + + // Zero out state_root before computing (state contains header, header contains state_root) + anchor_state.latest_block_header.state_root = H256::ZERO; + + // Compute state root with zeroed header let anchor_state_root = anchor_state.tree_hash_root(); - let anchor_block_root = anchor_block.tree_hash_root(); + + // Validate: original must be zero (genesis) or match computed (checkpoint sync) + assert!( + original_state_root == H256::ZERO || original_state_root == anchor_state_root, + "anchor header state_root mismatch: expected {anchor_state_root:?}, got {original_state_root:?}" + ); + + // Populate the correct state_root + anchor_state.latest_block_header.state_root = anchor_state_root; + + let anchor_block_root = anchor_state.latest_block_header.tree_hash_root(); let anchor_checkpoint = Checkpoint { root: anchor_block_root, - slot: anchor_block.slot, + slot: anchor_state.latest_block_header.slot, }; // Insert initial data @@ -170,15 +224,24 @@ impl Store { .put_batch(Table::Metadata, metadata_entries) .expect("put metadata"); - // Block and state - let block_entries = vec![( + // Block header + let header_entries = vec![( anchor_block_root.as_ssz_bytes(), - anchor_block.as_ssz_bytes(), + anchor_state.latest_block_header.as_ssz_bytes(), )]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + // Block body (if provided) + if let Some(body) = anchor_body { + let body_entries = vec![(anchor_block_root.as_ssz_bytes(), body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); + } + // State let state_entries = vec![( anchor_block_root.as_ssz_bytes(), anchor_state.as_ssz_bytes(), @@ -187,14 +250,14 @@ impl Store { .put_batch(Table::States, state_entries) .expect("put state"); - // Non-finalized chain index + // Live chain index let index_entries = vec![( - encode_live_chain_key(anchor_block.slot, &anchor_block_root), - anchor_block.parent_root.as_ssz_bytes(), + encode_live_chain_key(anchor_state.latest_block_header.slot, &anchor_block_root), + anchor_state.latest_block_header.parent_root.as_ssz_bytes(), )]; batch .put_batch(Table::LiveChain, index_entries) - .expect("put non-finalized chain index"); + .expect("put live chain index"); batch.commit().expect("commit"); } @@ -225,44 +288,50 @@ impl Store { // ============ Time ============ + /// Returns the current store time in seconds since genesis. pub fn time(&self) -> u64 { self.get_metadata(KEY_TIME) } + /// Sets the current store time. pub fn set_time(&mut self, time: u64) { self.set_metadata(KEY_TIME, &time); } // ============ Config ============ + /// Returns the chain configuration. pub fn config(&self) -> ChainConfig { self.get_metadata(KEY_CONFIG) } // ============ Head ============ + /// Returns the current head block root. pub fn head(&self) -> H256 { self.get_metadata(KEY_HEAD) } // ============ Safe Target ============ + /// Returns the safe target block root for attestations. pub fn safe_target(&self) -> H256 { self.get_metadata(KEY_SAFE_TARGET) } + /// Sets the safe target block root. pub fn set_safe_target(&mut self, safe_target: H256) { self.set_metadata(KEY_SAFE_TARGET, &safe_target); } - // ============ Latest Justified ============ + // ============ Checkpoints ============ + /// Returns the latest justified checkpoint. pub fn latest_justified(&self) -> Checkpoint { self.get_metadata(KEY_LATEST_JUSTIFIED) } - // ============ Latest Finalized ============ - + /// Returns the latest finalized checkpoint. pub fn latest_finalized(&self) -> Checkpoint { self.get_metadata(KEY_LATEST_FINALIZED) } @@ -454,36 +523,12 @@ impl Store { removed_count } - pub fn get_block(&self, root: &H256) -> Option { + /// Get the block header by root. + pub fn get_block_header(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); - view.get(Table::Blocks, &root.as_ssz_bytes()) + view.get(Table::BlockHeaders, &root.as_ssz_bytes()) .expect("get") - .map(|bytes| Block::from_ssz_bytes(&bytes).expect("valid block")) - } - - pub fn contains_block(&self, root: &H256) -> bool { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::Blocks, &root.as_ssz_bytes()) - .expect("get") - .is_some() - } - - pub fn insert_block(&mut self, root: H256, block: Block) { - let mut batch = self.backend.begin_write().expect("write batch"); - let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; - batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); - - let index_entries = vec![( - encode_live_chain_key(block.slot, &root), - block.parent_root.as_ssz_bytes(), - )]; - batch - .put_batch(Table::LiveChain, index_entries) - .expect("put non-finalized chain index"); - - batch.commit().expect("commit"); + .map(|bytes| BlockHeader::from_ssz_bytes(&bytes).expect("valid header")) } // ============ Signed Blocks ============ @@ -513,10 +558,19 @@ impl Store { let mut batch = self.backend.begin_write().expect("write batch"); - let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; + let header = block.header(); + let header_entries = vec![(root.as_ssz_bytes(), header.as_ssz_bytes())]; batch - .put_batch(Table::Blocks, block_entries) - .expect("put block"); + .put_batch(Table::BlockHeaders, header_entries) + .expect("put block header"); + + // Skip storing empty bodies - they can be reconstructed from the header's body_root + if header.body_root != *EMPTY_BODY_ROOT { + let body_entries = vec![(root.as_ssz_bytes(), block.body.as_ssz_bytes())]; + batch + .put_batch(Table::BlockBodies, body_entries) + .expect("put block body"); + } let sig_entries = vec![(root.as_ssz_bytes(), signatures.as_ssz_bytes())]; batch @@ -534,18 +588,28 @@ impl Store { batch.commit().expect("commit"); } - /// Get a signed block by combining block and signatures. + /// Get a signed block by combining header, body, and signatures. /// - /// Returns None if either the block or signatures are not found. + /// Returns None if any of the components are not found. /// Note: Genesis block has no entry in BlockSignatures table. pub fn get_signed_block(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); let key = root.as_ssz_bytes(); - let block_bytes = view.get(Table::Blocks, &key).expect("get")?; + let header_bytes = view.get(Table::BlockHeaders, &key).expect("get")?; let sig_bytes = view.get(Table::BlockSignatures, &key).expect("get")?; - let block = Block::from_ssz_bytes(&block_bytes).expect("valid block"); + let header = BlockHeader::from_ssz_bytes(&header_bytes).expect("valid header"); + + // Use empty body if header indicates empty, otherwise fetch from DB + let body = if header.body_root == *EMPTY_BODY_ROOT { + BlockBody::default() + } else { + let body_bytes = view.get(Table::BlockBodies, &key).expect("get")?; + BlockBody::from_ssz_bytes(&body_bytes).expect("valid body") + }; + + let block = Block::from_header_and_body(header, body); let signatures = BlockSignaturesWithAttestation::from_ssz_bytes(&sig_bytes).expect("valid signatures"); @@ -554,29 +618,23 @@ impl Store { // ============ States ============ - /// Iterate over all (root, state) pairs. - pub fn iter_states(&self) -> impl Iterator + '_ { + /// Returns the state for the given block root. + pub fn get_state(&self, root: &H256) -> Option { let view = self.backend.begin_read().expect("read view"); - let entries: Vec<_> = view - .prefix_iterator(Table::States, &[]) - .expect("iterator") - .filter_map(|res| res.ok()) - .map(|(k, v)| { - let root = H256::from_ssz_bytes(&k).expect("valid root"); - let state = State::from_ssz_bytes(&v).expect("valid state"); - (root, state) - }) - .collect(); - entries.into_iter() + view.get(Table::States, &root.as_ssz_bytes()) + .expect("get") + .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) } - pub fn get_state(&self, root: &H256) -> Option { + /// Returns whether a state exists for the given block root. + pub fn has_state(&self, root: &H256) -> bool { let view = self.backend.begin_read().expect("read view"); view.get(Table::States, &root.as_ssz_bytes()) .expect("get") - .map(|bytes| State::from_ssz_bytes(&bytes).expect("valid state")) + .is_some() } + /// Stores a state indexed by block root. pub fn insert_state(&mut self, root: H256, state: State) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(root.as_ssz_bytes(), state.as_ssz_bytes())]; @@ -584,9 +642,12 @@ impl Store { batch.commit().expect("commit"); } - // ============ Latest Known Attestations ============ + // ============ Known Attestations ============ + // + // "Known" attestations are included in fork choice weight calculations. + // They're promoted from "new" attestations at specific intervals. - /// Iterate over all (validator_id, attestation_data) pairs for known attestations. + /// Iterates over all known attestations (validator_id, attestation_data). pub fn iter_known_attestations(&self) -> impl Iterator + '_ { let view = self.backend.begin_read().expect("read view"); let entries: Vec<_> = view @@ -602,6 +663,7 @@ impl Store { entries.into_iter() } + /// Returns a validator's latest known attestation. pub fn get_known_attestation(&self, validator_id: &u64) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::LatestKnownAttestations, &validator_id.as_ssz_bytes()) @@ -609,6 +671,7 @@ impl Store { .map(|bytes| AttestationData::from_ssz_bytes(&bytes).expect("valid attestation data")) } + /// Stores a validator's latest known attestation. pub fn insert_known_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; @@ -618,9 +681,12 @@ impl Store { batch.commit().expect("commit"); } - // ============ Latest New Attestations ============ + // ============ New Attestations ============ + // + // "New" attestations are pending attestations not yet included in fork choice. + // They're promoted to "known" via `promote_new_attestations`. - /// Iterate over all (validator_id, attestation_data) pairs for new attestations. + /// Iterates over all new (pending) attestations. pub fn iter_new_attestations(&self) -> impl Iterator + '_ { let view = self.backend.begin_read().expect("read view"); let entries: Vec<_> = view @@ -636,6 +702,7 @@ impl Store { entries.into_iter() } + /// Returns a validator's latest new (pending) attestation. pub fn get_new_attestation(&self, validator_id: &u64) -> Option { let view = self.backend.begin_read().expect("read view"); view.get(Table::LatestNewAttestations, &validator_id.as_ssz_bytes()) @@ -643,6 +710,7 @@ impl Store { .map(|bytes| AttestationData::from_ssz_bytes(&bytes).expect("valid attestation data")) } + /// Stores a validator's new (pending) attestation. pub fn insert_new_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; @@ -652,6 +720,7 @@ impl Store { batch.commit().expect("commit"); } + /// Removes a validator's new (pending) attestation. pub fn remove_new_attestation(&mut self, validator_id: &u64) { let mut batch = self.backend.begin_write().expect("write batch"); batch @@ -695,8 +764,11 @@ impl Store { } // ============ Gossip Signatures ============ + // + // Gossip signatures are individual validator signatures received via P2P. + // They're aggregated into proofs for block signature verification. - /// Iterate over all (signature_key, stored_signature) pairs. + /// Iterates over all gossip signatures. pub fn iter_gossip_signatures( &self, ) -> impl Iterator + '_ { @@ -715,20 +787,7 @@ impl Store { entries.into_iter() } - pub fn get_gossip_signature(&self, key: &SignatureKey) -> Option { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::GossipSignatures, &encode_signature_key(key)) - .expect("get") - .and_then(|bytes| StoredSignature::from_ssz_bytes(&bytes).ok()) - } - - pub fn contains_gossip_signature(&self, key: &SignatureKey) -> bool { - let view = self.backend.begin_read().expect("read view"); - view.get(Table::GossipSignatures, &encode_signature_key(key)) - .expect("get") - .is_some() - } - + /// Stores a gossip signature for later aggregation. pub fn insert_gossip_signature( &mut self, attestation_data: &AttestationData, @@ -749,8 +808,11 @@ impl Store { } // ============ Aggregated Payloads ============ + // + // Aggregated payloads are leanVM proofs combining multiple signatures. + // Used to verify block signatures efficiently. - /// Iterate over all (signature_key, stored_payloads) pairs. + /// Iterates over all aggregated signature payloads. pub fn iter_aggregated_payloads( &self, ) -> impl Iterator)> + '_ { @@ -769,10 +831,8 @@ impl Store { entries.into_iter() } - pub fn get_aggregated_payloads( - &self, - key: &SignatureKey, - ) -> Option> { + /// Returns aggregated payloads for a signature key. + fn get_aggregated_payloads(&self, key: &SignatureKey) -> Option> { let view = self.backend.begin_read().expect("read view"); view.get(Table::AggregatedPayloads, &encode_signature_key(key)) .expect("get") @@ -821,7 +881,7 @@ impl Store { /// Returns the slot of the current safe target block. pub fn safe_target_slot(&self) -> u64 { - self.get_block(&self.safe_target()) + self.get_block_header(&self.safe_target()) .expect("safe target exists") .slot }