Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
By using one or the other, you are losing the ability to rollback or replay transactions earlier than `HEAD-n`.
When using _classic pruning_, you aren't able to fetch blocks prior to `HEAD-n`.

### Changes

- Store pending blocks separately from executed blocks key. [#3073](https://github.com/evstack/ev-node/pull/3073)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This changelog entry appears to be from a different pull request. This PR's changes are about allowing pruning when the DA layer is disabled. Please update the changelog to reflect the correct changes.

Suggested change
- Store pending blocks separately from executed blocks key. [#3073](https://github.com/evstack/ev-node/pull/3073)
- Allow pruning when the Data Availability (DA) layer is disabled.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nope, the changelog is unrelated to this pr (that do not require any)


## v1.0.0-rc.4

### Changes
Expand Down
4 changes: 2 additions & 2 deletions block/components.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func NewSyncComponents(
if p, ok := exec.(coreexecutor.ExecPruner); ok {
execPruner = p
}
pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration)
pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address)

// Create submitter for sync nodes (no signer, only DA inclusion processing)
var daSubmitter submitting.DASubmitterAPI = submitting.NewDASubmitter(daClient, config, genesis, blockOpts, metrics, logger, headerDAHintAppender, dataDAHintAppender)
Expand Down Expand Up @@ -271,7 +271,7 @@ func NewAggregatorComponents(
if p, ok := exec.(coreexecutor.ExecPruner); ok {
execPruner = p
}
pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration)
pruner := pruner.New(logger, store, execPruner, config.Pruning, config.Node.BlockTime.Duration, config.DA.Address)

reaper, err := reaping.NewReaper(
exec,
Expand Down
33 changes: 21 additions & 12 deletions block/internal/pruner/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ type Pruner struct {
execPruner coreexecutor.ExecPruner
cfg config.PruningConfig
blockTime time.Duration
daEnabled bool
logger zerolog.Logger

// Lifecycle
Expand All @@ -38,12 +39,14 @@ func New(
execPruner coreexecutor.ExecPruner,
cfg config.PruningConfig,
blockTime time.Duration,
daAddress string,
) *Pruner {
return &Pruner{
store: store,
execPruner: execPruner,
cfg: cfg,
blockTime: blockTime,
daEnabled: daAddress != "", // DA is enabled if address is provided
logger: logger.With().Str("component", "pruner").Logger(),
}
}
Expand Down Expand Up @@ -105,22 +108,28 @@ func (p *Pruner) pruneLoop() {

// pruneBlocks prunes blocks and their metadatas.
func (p *Pruner) pruneBlocks() error {
var currentDAIncluded uint64
currentDAIncludedBz, err := p.store.GetMetadata(p.ctx, store.DAIncludedHeightKey)
if err == nil && len(currentDAIncludedBz) == 8 {
currentDAIncluded = binary.LittleEndian.Uint64(currentDAIncludedBz)
} else {
// if we cannot get the current DA height, we cannot safely prune, so we skip pruning until we can get it.
return nil
}

storeHeight, err := p.store.Height(p.ctx)
if err != nil {
return fmt.Errorf("failed to get store height for pruning: %w", err)
}

// Never prune blocks that are not DA included
upperBound := min(storeHeight, currentDAIncluded)
upperBound := storeHeight

// If DA is enabled, only prune blocks that are DA included
if p.daEnabled {
var currentDAIncluded uint64
currentDAIncludedBz, err := p.store.GetMetadata(p.ctx, store.DAIncludedHeightKey)
if err == nil && len(currentDAIncludedBz) == 8 {
currentDAIncluded = binary.LittleEndian.Uint64(currentDAIncludedBz)
} else {
p.logger.Debug().Msg("skipping pruning: DA is enabled but DA included height is not available yet")
return nil
}

// Never prune blocks that are not DA included
upperBound = min(storeHeight, currentDAIncluded)
}

if upperBound <= p.cfg.KeepRecent {
// Not enough fully included blocks to prune
return nil
Expand Down Expand Up @@ -149,7 +158,7 @@ func (p *Pruner) pruneBlocks() error {
}
}

p.logger.Debug().Uint64("pruned_up_to_height", batchEnd).Msg("pruned blocks up to height")
p.logger.Debug().Uint64("pruned_up_to_height", batchEnd).Bool("da_enabled", p.daEnabled).Msg("pruned blocks up to height")
return nil
}

Expand Down
102 changes: 101 additions & 1 deletion block/internal/pruner/pruner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func TestPrunerPruneMetadata(t *testing.T) {
KeepRecent: 1,
}

pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, execAdapter, cfg, 100*time.Millisecond)
pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, execAdapter, cfg, 100*time.Millisecond, "") // Empty DA address
require.NoError(t, pruner.pruneMetadata())

_, err := stateStore.GetStateAtHeight(ctx, 1)
Expand All @@ -63,3 +63,103 @@ func TestPrunerPruneMetadata(t *testing.T) {
_, exists := execAdapter.existing[1]
require.False(t, exists)
}

func TestPrunerPruneBlocksWithoutDA(t *testing.T) {
t.Parallel()

ctx := context.Background()
kv := dssync.MutexWrap(ds.NewMapDatastore())
stateStore := store.New(kv)

// Create blocks without setting DAIncludedHeightKey (simulating node without DA)
for height := uint64(1); height <= 100; height++ {
header := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: height}}}
data := &types.Data{}
sig := types.Signature([]byte{byte(height)})

batch, err := stateStore.NewBatch(ctx)
require.NoError(t, err)
require.NoError(t, batch.SaveBlockData(header, data, &sig))
require.NoError(t, batch.SetHeight(height))
require.NoError(t, batch.UpdateState(types.State{LastBlockHeight: height}))
require.NoError(t, batch.Commit())
}

execAdapter := &execMetaAdapter{existing: make(map[uint64]struct{})}
for h := uint64(1); h <= 100; h++ {
execAdapter.existing[h] = struct{}{}
}

// Test with empty DA address (DA disabled) - should prune successfully
cfg := config.PruningConfig{
Mode: config.PruningModeAll,
Interval: config.DurationWrapper{Duration: 1 * time.Second},
KeepRecent: 10,
}

pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, execAdapter, cfg, 100*time.Millisecond, "") // Empty DA address = DA disabled
require.NoError(t, pruner.pruneBlocks())

// Verify blocks were pruned (batch size is 40 blocks: 1s interval / 100ms block time * 4)
// So we expect to prune from height 1 up to min(0 + 40, 90) = 40
height, err := stateStore.Height(ctx)
require.NoError(t, err)
require.Equal(t, uint64(100), height)

// Verify old blocks were pruned (up to height 40)
for h := uint64(1); h <= 40; h++ {
_, _, err := stateStore.GetBlockData(ctx, h)
require.Error(t, err, "expected block data at height %d to be pruned", h)
}

// Verify blocks after batch were kept
for h := uint64(41); h <= 100; h++ {
_, _, err := stateStore.GetBlockData(ctx, h)
require.NoError(t, err, "expected block data at height %d to be kept", h)
}

// Verify exec metadata was also pruned (strictly less than 40)
for h := uint64(1); h < 40; h++ {
_, exists := execAdapter.existing[h]
require.False(t, exists, "expected exec metadata at height %d to be pruned", h)
}
}

func TestPrunerPruneBlocksWithDAEnabled(t *testing.T) {
t.Parallel()

ctx := context.Background()
kv := dssync.MutexWrap(ds.NewMapDatastore())
stateStore := store.New(kv)

// Create blocks without setting DAIncludedHeightKey
for height := uint64(1); height <= 100; height++ {
header := &types.SignedHeader{Header: types.Header{BaseHeader: types.BaseHeader{Height: height}}}
data := &types.Data{}
sig := types.Signature([]byte{byte(height)})

batch, err := stateStore.NewBatch(ctx)
require.NoError(t, err)
require.NoError(t, batch.SaveBlockData(header, data, &sig))
require.NoError(t, batch.SetHeight(height))
require.NoError(t, batch.UpdateState(types.State{LastBlockHeight: height}))
require.NoError(t, batch.Commit())
}

// Test with DA address provided (DA enabled) - should skip pruning when DA height is not available
cfg := config.PruningConfig{
Mode: config.PruningModeAll,
Interval: config.DurationWrapper{Duration: 1 * time.Second},
KeepRecent: 10,
}

pruner := New(zerolog.New(zerolog.NewTestWriter(t)), stateStore, nil, cfg, 100*time.Millisecond, "localhost:1234") // DA enabled
// Should return nil (skip pruning) since DA height is not available
require.NoError(t, pruner.pruneBlocks())

// Verify no blocks were pruned (all blocks should still be retrievable)
for h := uint64(1); h <= 100; h++ {
_, _, err := stateStore.GetBlockData(ctx, h)
require.NoError(t, err, "expected block data at height %d to still exist (no pruning should have happened)", h)
}
}
3 changes: 2 additions & 1 deletion docs/learn/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,8 @@ _Constants:_ `FlagPruningMode`, `FlagPruningKeepRecent`, `FlagPruningInterval`

**Important Notes:**

- Pruning only removes blocks that have been confirmed on the DA layer (for mode `all`)
- When DA is enabled (DA address is configured), pruning only removes blocks that have been confirmed on the DA layer (for mode `all`) to ensure data safety
- When DA is not enabled (no DA address configured), pruning proceeds based solely on store height, allowing nodes without DA to manage disk space
- The first pruning run after enabling may take several cycles to catch up, processing data in smaller batches
- Pruning cannot be undone - ensure your retention window is sufficient for your use case
- For production deployments, consider keeping at least 100,000 recent blocks
Expand Down
Loading