diff --git a/CHANGELOG.md b/CHANGELOG.md index b2e1ff534..98961e622 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 By using one or the other, you are losing the ability to rollback or replay transactions earlier than `HEAD-n`. When using _classic pruning_, you aren't able to fetch blocks prior to `HEAD-n`. +### Changes + +- Store pending blocks separately from executed blocks key. [#3073](https://github.com/evstack/ev-node/pull/3073) + ## v1.0.0-rc.4 ### Changes diff --git a/block/components.go b/block/components.go index 40071b6f2..05852584a 100644 --- a/block/components.go +++ b/block/components.go @@ -182,7 +182,7 @@ func NewSyncComponents( if p, ok := exec.(coreexecutor.ExecPruner); ok { execPruner = p } - pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration) + pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address) // Create submitter for sync nodes (no signer, only DA inclusion processing) var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerDAHintAppender, dataDAHintAppender) @@ -271,7 +271,7 @@ func NewAggregatorComponents( if p, ok := exec.(coreexecutor.ExecPruner); ok { execPruner = p } - pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration) + pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address) reaper, err := reaping.NewReaper( exec, diff --git a/block/internal/pruner/pruner.go b/block/internal/pruner/pruner.go index 63d291a61..0c98cd1c8 100644 --- a/block/internal/pruner/pruner.go +++ b/block/internal/pruner/pruner.go @@ -23,6 +23,7 @@ type Pruner struct { execPruner coreexecutor.ExecPruner cfg config.PruningConfig blockTime time.Duration + daEnabled bool logger zerolog.Logger // Lifecycle @@ -38,12 +39,14 @@ func New( execPruner coreexecutor.ExecPruner, cfg config.PruningConfig, blockTime time.Duration, + daAddress string, ) *Pruner { return &Pruner{ store: store, execPruner: execPruner, cfg: cfg, blockTime: blockTime, + daEnabled: daAddress != "", // DA is enabled if address is provided logger: logger.With().Str("component", "pruner").Logger(), } } @@ -105,22 +108,28 @@ func (p *Pruner) pruneLoop() { // pruneBlocks prunes blocks and their metadatas. func (p *Pruner) pruneBlocks() error { - var currentDAIncluded uint64 - currentDAIncludedBz, err := p.store.GetMetadata(p.ctx, store.DAIncludedHeightKey) - if err == nil && len(currentDAIncludedBz) == 8 { - currentDAIncluded = binary.LittleEndian.Uint64(currentDAIncludedBz) - } else { - // if we cannot get the current DA height, we cannot safely prune, so we skip pruning until we can get it. - return nil - } - storeHeight, err := p.store.Height(p.ctx) if err != nil { return fmt.Errorf("failed to get store height for pruning: %w", err) } - // Never prune blocks that are not DA included - upperBound := min(storeHeight, currentDAIncluded) + upperBound := storeHeight + + // If DA is enabled, only prune blocks that are DA included + if p.daEnabled { + var currentDAIncluded uint64 + currentDAIncludedBz, err := p.store.GetMetadata(p.ctx, store.DAIncludedHeightKey) + if err == nil && len(currentDAIncludedBz) == 8 { + currentDAIncluded = binary.LittleEndian.Uint64(currentDAIncludedBz) + } else { + p.logger.Debug().Msg("skipping pruning: DA is enabled but DA included height is not available yet") + return nil + } + + // Never prune blocks that are not DA included + upperBound = min(storeHeight, currentDAIncluded) + } + if upperBound <= p.cfg.KeepRecent { // Not enough fully included blocks to prune return nil @@ -149,7 +158,7 @@ func (p *Pruner) pruneBlocks() error { } } - p.logger.Debug().Uint64("pruned_up_to_height", batchEnd).Msg("pruned blocks up to height") + p.logger.Debug().Uint64("pruned_up_to_height", batchEnd).Bool("da_enabled", p.daEnabled).Msg("pruned blocks up to height") return nil } diff --git a/block/internal/pruner/pruner_test.go b/block/internal/pruner/pruner_test.go index b57cdff9a..8f0ad4773 100644 --- a/block/internal/pruner/pruner_test.go +++ b/block/internal/pruner/pruner_test.go @@ -32,7 +32,7 @@ func (e *execMetaAdapter) PruneExec(ctx context.Context, height uint64) error { func TestPrunerPruneMetadata(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() kv := dssync.MutexWrap(ds.NewMapDatastore()) stateStore := store.New(kv) @@ -51,7 +51,7 @@ func TestPrunerPruneMetadata(t *testing.T) { KeepRecent: 1, } - pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, execAdapter, cfg, 100*time.Millisecond) + pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, execAdapter, cfg, 100*time.Millisecond, "") // Empty DA address require.NoError(t, pruner.pruneMetadata()) _, err := stateStore.GetStateAtHeight(ctx, 1) @@ -63,3 +63,103 @@ func TestPrunerPruneMetadata(t *testing.T) { _, exists := execAdapter.existing[1] require.False(t, exists) } + +func TestPrunerPruneBlocksWithoutDA(t *testing.T) { + t.Parallel() + + ctx := t.Context() + kv := dssync.MutexWrap(ds.NewMapDatastore()) + stateStore := store.New(kv) + + // Create blocks without setting DAIncludedHeightKey (simulating node without DA) + for height := uint64(1); height <= 100; height++ { + header := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: height}}} + data := &types.Data{} + sig := types.Signature([]byte{byte(height)}) + + batch, err := stateStore.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(header, data, &sig)) + require.NoError(t, batch.SetHeight(height)) + require.NoError(t, batch.UpdateState(types.State{LastBlockHeight: height})) + require.NoError(t, batch.Commit()) + } + + execAdapter := &execMetaAdapter{existing: make(map[uint64]struct{})} + for h := uint64(1); h <= 100; h++ { + execAdapter.existing[h] = struct{}{} + } + + // Test with empty DA address (DA disabled) - should prune successfully + cfg := config.PruningConfig{ + Mode: config.PruningModeAll, + Interval: config.DurationWrapper{Duration: 1 * time.Second}, + KeepRecent: 10, + } + + pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, execAdapter, cfg, 100*time.Millisecond, "") // Empty DA address = DA disabled + require.NoError(t, pruner.pruneBlocks()) + + // Verify blocks were pruned (batch size is 40 blocks: 1s interval / 100ms block time * 4) + // So we expect to prune from height 1 up to min(0 + 40, 90) = 40 + height, err := stateStore.Height(ctx) + require.NoError(t, err) + require.Equal(t, uint64(100), height) + + // Verify old blocks were pruned (up to height 40) + for h := uint64(1); h <= 40; h++ { + _, _, err := stateStore.GetBlockData(ctx, h) + require.Error(t, err, "expected block data at height %d to be pruned", h) + } + + // Verify blocks after batch were kept + for h := uint64(41); h <= 100; h++ { + _, _, err := stateStore.GetBlockData(ctx, h) + require.NoError(t, err, "expected block data at height %d to be kept", h) + } + + // Verify exec metadata was also pruned (strictly less than 40) + for h := uint64(1); h < 40; h++ { + _, exists := execAdapter.existing[h] + require.False(t, exists, "expected exec metadata at height %d to be pruned", h) + } +} + +func TestPrunerPruneBlocksWithDAEnabled(t *testing.T) { + t.Parallel() + + ctx := t.Context() + kv := dssync.MutexWrap(ds.NewMapDatastore()) + stateStore := store.New(kv) + + // Create blocks without setting DAIncludedHeightKey + for height := uint64(1); height <= 100; height++ { + header := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: height}}} + data := &types.Data{} + sig := types.Signature([]byte{byte(height)}) + + batch, err := stateStore.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(header, data, &sig)) + require.NoError(t, batch.SetHeight(height)) + require.NoError(t, batch.UpdateState(types.State{LastBlockHeight: height})) + require.NoError(t, batch.Commit()) + } + + // Test with DA address provided (DA enabled) - should skip pruning when DA height is not available + cfg := config.PruningConfig{ + Mode: config.PruningModeAll, + Interval: config.DurationWrapper{Duration: 1 * time.Second}, + KeepRecent: 10, + } + + pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, nil, cfg, 100*time.Millisecond, "localhost:1234") // DA enabled + // Should return nil (skip pruning) since DA height is not available + require.NoError(t, pruner.pruneBlocks()) + + // Verify no blocks were pruned (all blocks should still be retrievable) + for h := uint64(1); h <= 100; h++ { + _, _, err := stateStore.GetBlockData(ctx, h) + require.NoError(t, err, "expected block data at height %d to still exist (no pruning should have happened)", h) + } +} diff --git a/docs/learn/config.md b/docs/learn/config.md index 1cd2dcef0..fed11350e 100644 --- a/docs/learn/config.md +++ b/docs/learn/config.md @@ -340,7 +340,8 @@ _Constants:_ `FlagPruningMode`, `FlagPruningKeepRecent`, `FlagPruningInterval` **Important Notes:** -- Pruning only removes blocks that have been confirmed on the DA layer (for mode `all`) +- When DA is enabled (DA address is configured), pruning only removes blocks that have been confirmed on the DA layer (for mode `all`) to ensure data safety +- When DA is not enabled (no DA address configured), pruning proceeds based solely on store height, allowing nodes without DA to manage disk space - The first pruning run after enabling may take several cycles to catch up, processing data in smaller batches - Pruning cannot be undone - ensure your retention window is sufficient for your use case - For production deployments, consider keeping at least 100,000 recent blocks