From 0ece7006132d89ce630bab537bd41ddd35124a1b Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Wed, 25 Feb 2026 17:27:29 -0800 Subject: [PATCH 01/18] [Access] Add scheduled transactions extended index and endpoints --- access/backends/extended/api.go | 41 + access/backends/extended/backend.go | 14 +- access/backends/extended/backend_base.go | 20 + .../backend_scheduled_transactions.go | 372 +++++ .../backend_scheduled_transactions_test.go | 1242 +++++++++++++++++ access/backends/extended/mock/api.go | 270 ++++ .../node_builder/access_node_builder.go | 10 + cmd/observer/node_builder/observer_builder.go | 24 + .../rest/experimental/models/contract.go | 9 + .../experimental/models/model_contract.go | 16 + .../models/model_scheduled_transaction.go | 43 + ...model_scheduled_transaction__expandable.go | 19 + .../model_scheduled_transaction_priority.go | 19 + .../model_scheduled_transaction_status.go | 20 + .../model_scheduled_transactions_response.go | 14 + .../models/scheduled_transaction.go | 105 ++ .../request/cursor_scheduled_transactions.go | 38 + .../request/get_scheduled_transactions.go | 172 +++ .../routes/scheduled_transactions.go | 108 ++ .../routes/scheduled_transactions_test.go | 671 +++++++++ .../access/rest/router/routes_experimental.go | 15 + .../extended_indexing_scheduled_txs_test.go | 253 ++++ .../access/cohort3/extended_indexing_test.go | 10 +- model/access/contract.go | 7 + model/access/scheduled_transaction.go | 136 ++ .../indexer/extended/bootstrap.go | 10 + .../indexer/extended/events/helpers.go | 15 + .../indexer/extended/events/helpers_test.go | 29 + .../extended/events/scheduled_transaction.go | 187 +++ .../indexer/extended/mock/script_executor.go | 118 ++ .../extended/scheduled_transaction_data.go | 141 ++ .../scheduled_transaction_data_test.go | 253 ++++ .../scheduled_transaction_requester.go | 139 ++ .../scheduled_transaction_requester_test.go | 206 +++ .../extended/scheduled_transactions.go | 398 ++++++ .../extended/scheduled_transactions_test.go | 1039 ++++++++++++++ .../indexer/extended/test_helpers_test.go | 81 ++ storage/account_transactions.go | 2 +- storage/account_transfers.go | 4 +- storage/errors.go | 4 + storage/indexes/prefix.go | 6 + storage/indexes/scheduled_transactions.go | 380 +++++ .../scheduled_transactions_bootstrapper.go | 234 ++++ ...cheduled_transactions_bootstrapper_test.go | 287 ++++ .../indexes/scheduled_transactions_test.go | 390 ++++++ storage/locks.go | 5 + storage/mock/scheduled_transactions_index.go | 606 ++++++++ ...heduled_transactions_index_bootstrapper.go | 677 +++++++++ ...heduled_transactions_index_range_reader.go | 124 ++ .../scheduled_transactions_index_reader.go | 229 +++ .../scheduled_transactions_index_writer.go | 328 +++++ storage/scheduled_transactions_index.go | 157 +++ 52 files changed, 9687 insertions(+), 10 deletions(-) create mode 100644 access/backends/extended/backend_scheduled_transactions.go create mode 100644 access/backends/extended/backend_scheduled_transactions_test.go create mode 100644 engine/access/rest/experimental/models/contract.go create mode 100644 engine/access/rest/experimental/models/model_contract.go create mode 100644 engine/access/rest/experimental/models/model_scheduled_transaction.go create mode 100644 engine/access/rest/experimental/models/model_scheduled_transaction__expandable.go create mode 100644 engine/access/rest/experimental/models/model_scheduled_transaction_priority.go create mode 100644 engine/access/rest/experimental/models/model_scheduled_transaction_status.go create mode 100644 engine/access/rest/experimental/models/model_scheduled_transactions_response.go create mode 100644 engine/access/rest/experimental/models/scheduled_transaction.go create mode 100644 engine/access/rest/experimental/request/cursor_scheduled_transactions.go create mode 100644 engine/access/rest/experimental/request/get_scheduled_transactions.go create mode 100644 engine/access/rest/experimental/routes/scheduled_transactions.go create mode 100644 engine/access/rest/experimental/routes/scheduled_transactions_test.go create mode 100644 integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go create mode 100644 model/access/contract.go create mode 100644 model/access/scheduled_transaction.go create mode 100644 module/state_synchronization/indexer/extended/events/scheduled_transaction.go create mode 100644 module/state_synchronization/indexer/extended/mock/script_executor.go create mode 100644 module/state_synchronization/indexer/extended/scheduled_transaction_data.go create mode 100644 module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go create mode 100644 module/state_synchronization/indexer/extended/scheduled_transaction_requester.go create mode 100644 module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go create mode 100644 module/state_synchronization/indexer/extended/scheduled_transactions.go create mode 100644 module/state_synchronization/indexer/extended/scheduled_transactions_test.go create mode 100644 module/state_synchronization/indexer/extended/test_helpers_test.go create mode 100644 storage/indexes/scheduled_transactions.go create mode 100644 storage/indexes/scheduled_transactions_bootstrapper.go create mode 100644 storage/indexes/scheduled_transactions_bootstrapper_test.go create mode 100644 storage/indexes/scheduled_transactions_test.go create mode 100644 storage/mock/scheduled_transactions_index.go create mode 100644 storage/mock/scheduled_transactions_index_bootstrapper.go create mode 100644 storage/mock/scheduled_transactions_index_range_reader.go create mode 100644 storage/mock/scheduled_transactions_index_reader.go create mode 100644 storage/mock/scheduled_transactions_index_writer.go create mode 100644 storage/scheduled_transactions_index.go diff --git a/access/backends/extended/api.go b/access/backends/extended/api.go index 0574f6b4a01..4dee88fba3a 100644 --- a/access/backends/extended/api.go +++ b/access/backends/extended/api.go @@ -70,4 +70,45 @@ type API interface { expandOptions AccountTransferExpandOptions, encodingVersion entities.EventEncodingVersion, ) (*accessmodel.NonFungibleTokenTransfersPage, error) + + // GetScheduledTransaction returns a single scheduled transaction by its scheduler-assigned ID. + // + // Expected error returns during normal operations: + // - [codes.NotFound]: if no transaction with the given ID exists + // - [codes.FailedPrecondition]: if the index has not been initialized + GetScheduledTransaction( + ctx context.Context, + id uint64, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.ScheduledTransaction, error) + + // GetScheduledTransactions returns a paginated list of scheduled transactions. + // + // Expected error returns during normal operations: + // - [codes.FailedPrecondition]: if the index has not been initialized + // - [codes.InvalidArgument]: if the query parameters are invalid + GetScheduledTransactions( + ctx context.Context, + limit uint32, + cursor *accessmodel.ScheduledTransactionCursor, + filter ScheduledTransactionFilter, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.ScheduledTransactionsPage, error) + + // GetScheduledTransactionsByAddress returns a paginated list of scheduled transactions for the given address. + // + // Expected error returns during normal operations: + // - [codes.FailedPrecondition]: if the index has not been initialized + // - [codes.InvalidArgument]: if the query parameters are invalid + GetScheduledTransactionsByAddress( + ctx context.Context, + address flow.Address, + limit uint32, + cursor *accessmodel.ScheduledTransactionCursor, + filter ScheduledTransactionFilter, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.ScheduledTransactionsPage, error) } diff --git a/access/backends/extended/backend.go b/access/backends/extended/backend.go index 73ea0a51a55..0a1b539176b 100644 --- a/access/backends/extended/backend.go +++ b/access/backends/extended/backend.go @@ -15,6 +15,7 @@ import ( txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" "github.com/onflow/flow-go/model/access/systemcollection" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" @@ -38,6 +39,7 @@ func DefaultConfig() Config { type Backend struct { *AccountTransactionsBackend *AccountTransfersBackend + *ScheduledTransactionsBackend log zerolog.Logger } @@ -61,7 +63,9 @@ func New( collections storage.CollectionsReader, transactions storage.TransactionsReader, scheduledTransactions storage.ScheduledTransactionsReader, + scheduledTxIndex storage.ScheduledTransactionsIndexReader, txStatusDeriver *txstatus.TxStatusDeriver, + scriptExecutor execution.ScriptExecutor, ) (*Backend, error) { log = log.With().Str("component", "extended_backend").Logger() @@ -94,9 +98,10 @@ func New( chain := chainID.Chain() return &Backend{ - log: log, - AccountTransactionsBackend: NewAccountTransactionsBackend(log, base, store, chain), - AccountTransfersBackend: NewAccountTransfersBackend(log, base, ftStore, nftStore, chain), + log: log, + AccountTransactionsBackend: NewAccountTransactionsBackend(log, base, store, chain), + AccountTransfersBackend: NewAccountTransfersBackend(log, base, ftStore, nftStore, chain), + ScheduledTransactionsBackend: NewScheduledTransactionsBackend(log, base, scheduledTxIndex, scheduledTransactions, state, scriptExecutor), }, nil } @@ -112,8 +117,7 @@ func mapReadError(ctx context.Context, label string, err error) error { case errors.Is(err, storage.ErrNotFound): return status.Errorf(codes.NotFound, "not found: %v", err) default: - err = fmt.Errorf("failed to get %s: %w", label, err) - irrecoverable.Throw(ctx, err) + irrecoverable.Throw(ctx, fmt.Errorf("failed to get %s: %w", label, err)) return err } } diff --git a/access/backends/extended/backend_base.go b/access/backends/extended/backend_base.go index 58032fadf3f..c4ae5519fff 100644 --- a/access/backends/extended/backend_base.go +++ b/access/backends/extended/backend_base.go @@ -6,11 +6,14 @@ import ( "fmt" "github.com/onflow/flow/protobuf/go/flow/entities" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/access/systemcollection" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) @@ -28,6 +31,23 @@ type backendBase struct { systemCollections *systemcollection.Versioned } +// mapReadError converts storage read errors to appropriate gRPC status errors. +func (b *backendBase) mapReadError(ctx context.Context, label string, err error) error { + switch { + case errors.Is(err, storage.ErrNotBootstrapped): + return status.Errorf(codes.FailedPrecondition, "%s index not initialized: %v", label, err) + case errors.Is(err, storage.ErrHeightNotIndexed): + return status.Errorf(codes.OutOfRange, "requested height not indexed: %v", err) + case errors.Is(err, storage.ErrInvalidQuery): + return status.Errorf(codes.InvalidArgument, "invalid query: %v", err) + case errors.Is(err, storage.ErrNotFound): + return status.Errorf(codes.NotFound, "not found: %v", err) + default: + irrecoverable.Throw(ctx, fmt.Errorf("failed to get %s: %w", label, err)) + return err + } +} + // normalizeLimit applies default page size when limit is 0, and returns an error if the limit // exceeds the configured maximum. // diff --git a/access/backends/extended/backend_scheduled_transactions.go b/access/backends/extended/backend_scheduled_transactions.go new file mode 100644 index 00000000000..a165c43744e --- /dev/null +++ b/access/backends/extended/backend_scheduled_transactions.go @@ -0,0 +1,372 @@ +package extended + +import ( + "context" + "fmt" + "strings" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/indexes/iterator" +) + +type ScheduledTransactionExpandOptions struct { + Result bool + Transaction bool + HandlerContract bool +} + +func (o *ScheduledTransactionExpandOptions) HasExpand() bool { + return o.Result || o.Transaction || o.HandlerContract +} + +// ScheduledTransactionFilter specifies optional filter criteria for scheduled transaction queries. +// All fields are optional; nil/zero fields are ignored. +type ScheduledTransactionFilter struct { + Statuses []accessmodel.ScheduledTransactionStatus + Priority *accessmodel.ScheduledTransactionPriority + StartTime *uint64 // inclusive UFix64 timestamp lower bound + EndTime *uint64 // inclusive UFix64 timestamp upper bound + TransactionHandlerOwner *flow.Address + TransactionHandlerTypeID *string + TransactionHandlerUUID *uint64 +} + +func (f *ScheduledTransactionFilter) IsEmpty() bool { + if f == nil { + return true + } + if len(f.Statuses) == 0 && + f.Priority == nil && + f.StartTime == nil && + f.EndTime == nil && + f.TransactionHandlerOwner == nil && + f.TransactionHandlerTypeID == nil && + f.TransactionHandlerUUID == nil { + return true + } + return false +} + +// Filter builds a [storage.IndexFilter] from the non-nil filter fields. +func (f *ScheduledTransactionFilter) Filter() storage.IndexFilter[*accessmodel.ScheduledTransaction] { + if f.IsEmpty() { + return nil + } + + statuses := make(map[accessmodel.ScheduledTransactionStatus]bool) + for _, status := range f.Statuses { + statuses[status] = true + } + + return func(tx *accessmodel.ScheduledTransaction) bool { + if len(statuses) > 0 && !statuses[tx.Status] { + return false + } + + if f.Priority != nil && tx.Priority != *f.Priority { + return false + } + if f.StartTime != nil && tx.Timestamp < *f.StartTime { + return false + } + if f.EndTime != nil && tx.Timestamp > *f.EndTime { + return false + } + if f.TransactionHandlerOwner != nil && tx.TransactionHandlerOwner != *f.TransactionHandlerOwner { + return false + } + if f.TransactionHandlerTypeID != nil && tx.TransactionHandlerTypeIdentifier != *f.TransactionHandlerTypeID { + return false + } + if f.TransactionHandlerUUID != nil && tx.TransactionHandlerUUID != *f.TransactionHandlerUUID { + return false + } + return true + } +} + +// ScheduledTransactionsBackend implements the extended API for querying scheduled transactions. +type ScheduledTransactionsBackend struct { + *backendBase + + log zerolog.Logger + store storage.ScheduledTransactionsIndexReader + scheduledTxLookup storage.ScheduledTransactionsReader + state protocol.State + scriptExecutor execution.ScriptExecutor +} + +// NewScheduledTransactionsBackend creates a new [ScheduledTransactionsBackend]. +func NewScheduledTransactionsBackend( + log zerolog.Logger, + base *backendBase, + store storage.ScheduledTransactionsIndexReader, + scheduledTxLookup storage.ScheduledTransactionsReader, + state protocol.State, + scriptExecutor execution.ScriptExecutor, +) *ScheduledTransactionsBackend { + return &ScheduledTransactionsBackend{ + backendBase: base, + log: log, + store: store, + scheduledTxLookup: scheduledTxLookup, + state: state, + scriptExecutor: scriptExecutor, + } +} + +// GetScheduledTransaction returns a single scheduled transaction by its scheduler-assigned ID. +// +// Expected error returns during normal operations: +// - [codes.NotFound]: if no transaction with the given ID exists +// - [codes.FailedPrecondition]: if the index has not been initialized +func (b *ScheduledTransactionsBackend) GetScheduledTransaction( + ctx context.Context, + id uint64, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.ScheduledTransaction, error) { + tx, err := b.store.ByID(id) + if err != nil { + return nil, b.mapReadError(ctx, "scheduled transaction", err) + } + + if !expandOptions.HasExpand() { + return &tx, nil + } + + if err := b.expand(ctx, &tx, expandOptions, encodingVersion); err != nil { + err = fmt.Errorf("failed to expand scheduled transaction %d: %w", tx.ID, err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + return &tx, nil +} + +// GetScheduledTransactions returns a paginated list of scheduled transactions. +// When filter.Address is set, results are scoped to that address; otherwise all are returned. +// +// Expected error returns during normal operations: +// - [codes.FailedPrecondition]: if the index has not been initialized +// - [codes.InvalidArgument]: if the query parameters are invalid +func (b *ScheduledTransactionsBackend) GetScheduledTransactions( + ctx context.Context, + limit uint32, + cursor *accessmodel.ScheduledTransactionCursor, + filter ScheduledTransactionFilter, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.ScheduledTransactionsPage, error) { + limit, err := b.normalizeLimit(limit) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid limit: %v", err) + } + + iter, err := b.store.All(cursor) + if err != nil { + return nil, b.mapReadError(ctx, "scheduled transactions", err) + } + + collected, nextCursor, err := iterator.CollectResults(iter, limit, filter.Filter()) + if err != nil { + err = fmt.Errorf("error collecting scheduled transactions: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: collected, + NextCursor: nextCursor, + } + + if !expandOptions.HasExpand() { + return page, nil + } + + for i := range page.Transactions { + if err := b.expand(ctx, &page.Transactions[i], expandOptions, encodingVersion); err != nil { + err = fmt.Errorf("failed to expand scheduled transaction %d: %w", page.Transactions[i].ID, err) + irrecoverable.Throw(ctx, err) + return nil, err + } + } + + return page, nil +} + +// GetScheduledTransactionsByAddress returns a paginated list of scheduled transactions for the given address. +// +// Expected error returns during normal operations: +// - [codes.FailedPrecondition]: if the index has not been initialized +// - [codes.InvalidArgument]: if the query parameters are invalid +func (b *ScheduledTransactionsBackend) GetScheduledTransactionsByAddress( + ctx context.Context, + address flow.Address, + limit uint32, + cursor *accessmodel.ScheduledTransactionCursor, + filter ScheduledTransactionFilter, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.ScheduledTransactionsPage, error) { + limit, err := b.normalizeLimit(limit) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid limit: %v", err) + } + + iter, err := b.store.ByAddress(address, cursor) + if err != nil { + return nil, b.mapReadError(ctx, "scheduled transactions", err) + } + + collected, nextCursor, err := iterator.CollectResults(iter, limit, filter.Filter()) + if err != nil { + err = fmt.Errorf("error collecting scheduled transactions: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: collected, + NextCursor: nextCursor, + } + + if !expandOptions.HasExpand() { + return page, nil + } + + for i := range page.Transactions { + if err := b.expand(ctx, &page.Transactions[i], expandOptions, encodingVersion); err != nil { + err = fmt.Errorf("failed to expand scheduled transaction %d: %w", page.Transactions[i].ID, err) + irrecoverable.Throw(ctx, err) + return nil, err + } + } + + return page, nil +} + +// expand enriches an executed scheduled transaction with its transaction result. +// For non-executed transactions, this is a no-op. +// +// No error returns are expected during normal operation. +func (b *ScheduledTransactionsBackend) expand( + ctx context.Context, + tx *accessmodel.ScheduledTransaction, + expandOptions ScheduledTransactionExpandOptions, + encodingVersion entities.EventEncodingVersion, +) error { + if expandOptions.HandlerContract { + err := b.expandHandlerContract(ctx, tx) + if err != nil { + return fmt.Errorf("failed to expand handler contract for scheduled tx %d: %w", tx.ID, err) + } + } + + if !expandOptions.Transaction && !expandOptions.Result { + return nil + } + + // if the transaction was not executed, there's nothing to expand. + if tx.Status != accessmodel.ScheduledTxStatusExecuted && tx.Status != accessmodel.ScheduledTxStatusFailed { + return nil + } + + txID, err := b.scheduledTxLookup.TransactionIDByID(tx.ID) + if err != nil { + // the transaction is marked as executed, so it must exist in storage. + return fmt.Errorf("failed to lookup transaction ID for scheduled tx %d: %w", tx.ID, err) + } + + blockID, err := b.scheduledTxLookup.BlockIDByTransactionID(txID) + if err != nil { + return fmt.Errorf("failed to lookup block ID for tx %s: %w", txID, err) + } + + header, err := b.headers.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("failed to get header for block %s: %w", blockID, err) + } + + if expandOptions.Transaction { + allScheduledTxs, err := b.transactionsProvider.ScheduledTransactionsByBlockID(ctx, header) + if err != nil { + return fmt.Errorf("could not retrieve all scheduled transactions: %w", err) + } + + for _, scheduledTx := range allScheduledTxs { + if scheduledTx.ID() == txID { + tx.Transaction = scheduledTx + break + } + } + if tx.Transaction == nil { + return fmt.Errorf("scheduled transaction %s not found in block %s", txID, blockID) + } + } + + if expandOptions.Result { + result, err := b.getTransactionResult(ctx, txID, header, true, expandOptions.Transaction, encodingVersion) + if err != nil { + return fmt.Errorf("failed to get transaction result for tx %s: %w", txID, err) + } + tx.Result = result + } + + return nil +} + +func (b *ScheduledTransactionsBackend) expandHandlerContract( + ctx context.Context, + tx *accessmodel.ScheduledTransaction, +) error { + latest, err := b.state.Sealed().Head() + if err != nil { + return fmt.Errorf("failed to get latest sealed header: %w", err) + } + + // TODO: switch to the contracts index when it's implemented + account, err := b.scriptExecutor.GetAccountAtBlockHeight(ctx, tx.TransactionHandlerOwner, latest.Height) + if err != nil { + return fmt.Errorf("failed to get account for tx handler %s: %w", tx.TransactionHandlerOwner, err) + } + + contractID, err := transactionHandlerContract(tx.TransactionHandlerTypeIdentifier) + if err != nil { + return fmt.Errorf("failed to get contract ID for tx handler %s: %w", tx.TransactionHandlerTypeIdentifier, err) + } + contract, ok := account.Contracts[contractID] + if !ok { + return fmt.Errorf("contract %q not found in account %s", contractID, tx.TransactionHandlerOwner) + } + + tx.HandlerContract = &accessmodel.Contract{ + Identifier: contractID, + Body: string(contract), + } + + return nil +} + +// transactionHandlerContract extracts the contract ID from a transaction handler type identifier. +// The handler type identifier is a fully qualified Cadence type identifier of the transaction handler, +// e.g. +// +// A.1654653399040a61.MyScheduler.Handler -> A.1654653399040a61.MyScheduler +func transactionHandlerContract(handlerTypeIdentifier string) (string, error) { + parts := strings.Split(handlerTypeIdentifier, ".") + if len(parts) < 3 { + return "", fmt.Errorf("invalid handler type identifier: %s", handlerTypeIdentifier) + } + return strings.Join(parts[:3], "."), nil +} diff --git a/access/backends/extended/backend_scheduled_transactions_test.go b/access/backends/extended/backend_scheduled_transactions_test.go new file mode 100644 index 00000000000..040d4b6a419 --- /dev/null +++ b/access/backends/extended/backend_scheduled_transactions_test.go @@ -0,0 +1,1242 @@ +package extended + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + providermock "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider/mock" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + executionmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/irrecoverable" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// testSchedTxEntry is a simple storage.IteratorEntry implementation for tests. +type testSchedTxEntry struct { + tx accessmodel.ScheduledTransaction +} + +func (e testSchedTxEntry) Cursor() (accessmodel.ScheduledTransactionCursor, error) { + return accessmodel.ScheduledTransactionCursor{ID: e.tx.ID}, nil +} + +func (e testSchedTxEntry) Value() (accessmodel.ScheduledTransaction, error) { + return e.tx, nil +} + +// makeScheduledTxIter builds a storage.ScheduledTransactionIterator from a slice of transactions. +func makeScheduledTxIter(txs []accessmodel.ScheduledTransaction) storage.ScheduledTransactionIterator { + return func(yield func(storage.IteratorEntry[accessmodel.ScheduledTransaction, accessmodel.ScheduledTransactionCursor]) bool) { + for _, tx := range txs { + if !yield(testSchedTxEntry{tx: tx}) { + return + } + } + } +} + +// signalerCtxExpectingThrow creates a context that asserts irrecoverable.Throw is called +// with a non-nil error. Returns the context and a verification function that must be called +// after the operation under test to confirm Throw was invoked. +func signalerCtxExpectingThrow(t *testing.T) (context.Context, func()) { + t.Helper() + thrown := make(chan error, 1) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextWithCallback(t, context.Background(), func(err error) { + select { + case thrown <- err: + default: + } + })) + verify := func() { + t.Helper() + select { + case err := <-thrown: + require.Error(t, err, "irrecoverable.Throw must be called with a non-nil error") + default: + t.Fatal("expected irrecoverable.Throw to be called but it was not") + } + } + return signalerCtx, verify +} + +// TestTransactionHandlerContract tests the helper that extracts the contract ID from a +// transaction handler type identifier. +func TestTransactionHandlerContract(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + expected string + expectedErr bool + }{ + { + name: "standard type identifier", + input: "A.1654653399040a61.MyScheduler.Handler", + expected: "A.1654653399040a61.MyScheduler", + }, + { + name: "deeply nested type identifier returns A.address.Contract prefix only", + input: "A.1654653399040a61.MyScheduler.SubModule.Handler", + expected: "A.1654653399040a61.MyScheduler", + }, + { + name: "exactly three parts is valid", + input: "A.1654653399040a61.MyScheduler", + expected: "A.1654653399040a61.MyScheduler", + }, + { + name: "fewer than three parts returns error", + input: "SomeContract.Handler", + expectedErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + contractID, err := transactionHandlerContract(tt.input) + if tt.expectedErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.expected, contractID) + }) + } +} + +// TestScheduledTransactionFilter tests that ScheduledTransactionFilter.Filter produces a +// predicate that correctly matches or rejects scheduled transactions for each filter field, +// and for combined multi-field filters. +func TestScheduledTransactionFilter(t *testing.T) { + t.Parallel() + + ownerAddr := unittest.RandomAddressFixture() + otherAddr := unittest.RandomAddressFixture() + handlerTypeID := "A.1654653399040a61.MyScheduler.Handler" + otherTypeID := "A.0000000000000001.OtherScheduler.Handler" + + tx := &accessmodel.ScheduledTransaction{ + ID: 42, + Status: accessmodel.ScheduledTxStatusScheduled, + Priority: 5, + Timestamp: 1000, + TransactionHandlerOwner: ownerAddr, + TransactionHandlerTypeIdentifier: handlerTypeID, + TransactionHandlerUUID: 99, + } + + t.Run("empty filter returns nil", func(t *testing.T) { + filter := ScheduledTransactionFilter{} + assert.Nil(t, filter.Filter()) + }) + + t.Run("status filter matches", func(t *testing.T) { + filter := ScheduledTransactionFilter{ + Statuses: []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusScheduled}, + } + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("status filter rejects mismatch", func(t *testing.T) { + filter := ScheduledTransactionFilter{ + Statuses: []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusExecuted}, + } + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("status filter matches when one of multiple statuses matches", func(t *testing.T) { + filter := ScheduledTransactionFilter{ + Statuses: []accessmodel.ScheduledTransactionStatus{ + accessmodel.ScheduledTxStatusExecuted, + accessmodel.ScheduledTxStatusScheduled, + }, + } + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("priority filter matches", func(t *testing.T) { + p := accessmodel.ScheduledTransactionPriority(5) + filter := ScheduledTransactionFilter{Priority: &p} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("priority filter rejects mismatch", func(t *testing.T) { + p := accessmodel.ScheduledTransactionPriority(10) + filter := ScheduledTransactionFilter{Priority: &p} + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("start time inclusive lower bound matches equal timestamp", func(t *testing.T) { + start := uint64(1000) + filter := ScheduledTransactionFilter{StartTime: &start} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("start time rejects timestamp below bound", func(t *testing.T) { + start := uint64(1001) + filter := ScheduledTransactionFilter{StartTime: &start} + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("end time inclusive upper bound matches equal timestamp", func(t *testing.T) { + end := uint64(1000) + filter := ScheduledTransactionFilter{EndTime: &end} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("end time rejects timestamp above bound", func(t *testing.T) { + end := uint64(999) + filter := ScheduledTransactionFilter{EndTime: &end} + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("start and end time window matches timestamp within range", func(t *testing.T) { + start := uint64(900) + end := uint64(1100) + filter := ScheduledTransactionFilter{StartTime: &start, EndTime: &end} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("handler owner filter matches", func(t *testing.T) { + filter := ScheduledTransactionFilter{TransactionHandlerOwner: &ownerAddr} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("handler owner filter rejects mismatch", func(t *testing.T) { + filter := ScheduledTransactionFilter{TransactionHandlerOwner: &otherAddr} + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("handler type ID filter matches", func(t *testing.T) { + filter := ScheduledTransactionFilter{TransactionHandlerTypeID: &handlerTypeID} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("handler type ID filter rejects mismatch", func(t *testing.T) { + filter := ScheduledTransactionFilter{TransactionHandlerTypeID: &otherTypeID} + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("handler UUID filter matches", func(t *testing.T) { + uuid := uint64(99) + filter := ScheduledTransactionFilter{TransactionHandlerUUID: &uuid} + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("handler UUID filter rejects mismatch", func(t *testing.T) { + uuid := uint64(100) + filter := ScheduledTransactionFilter{TransactionHandlerUUID: &uuid} + assert.False(t, filter.Filter()(tx)) + }) + + t.Run("combined filters all match", func(t *testing.T) { + p := accessmodel.ScheduledTransactionPriority(5) + start := uint64(1000) + end := uint64(1000) + uuid := uint64(99) + filter := ScheduledTransactionFilter{ + Statuses: []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusScheduled}, + Priority: &p, + StartTime: &start, + EndTime: &end, + TransactionHandlerOwner: &ownerAddr, + TransactionHandlerTypeID: &handlerTypeID, + TransactionHandlerUUID: &uuid, + } + assert.True(t, filter.Filter()(tx)) + }) + + t.Run("combined filters reject on single mismatch", func(t *testing.T) { + p := accessmodel.ScheduledTransactionPriority(5) + wrongUUID := uint64(100) // mismatch + filter := ScheduledTransactionFilter{ + Priority: &p, + TransactionHandlerUUID: &wrongUUID, + } + assert.False(t, filter.Filter()(tx)) + }) +} + +// TestScheduledTransactionsBackend_GetScheduledTransaction tests all code paths for the +// GetScheduledTransaction method, including storage error mappings and all expand combinations. +func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { + t.Parallel() + + defaultEncoding := entities.EventEncodingVersion_JSON_CDC_V0 + defaultConfig := DefaultConfig() + + t.Run("happy path: returns transaction without expand", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + expectedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusScheduled} + store.On("ByID", uint64(1)).Return(expectedTx, nil).Once() + + result, err := backend.GetScheduledTransaction( + context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding, + ) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, expectedTx, *result) + assert.Nil(t, result.Transaction) + assert.Nil(t, result.Result) + assert.Nil(t, result.HandlerContract) + }) + + t.Run("ErrNotFound maps to codes.NotFound", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + store.On("ByID", uint64(99)).Return(accessmodel.ScheduledTransaction{}, storage.ErrNotFound).Once() + + _, err := backend.GetScheduledTransaction( + context.Background(), 99, ScheduledTransactionExpandOptions{}, defaultEncoding, + ) + require.Error(t, err) + st, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("ErrNotBootstrapped maps to codes.FailedPrecondition", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + store.On("ByID", uint64(1)).Return(accessmodel.ScheduledTransaction{}, storage.ErrNotBootstrapped).Once() + + _, err := backend.GetScheduledTransaction( + context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding, + ) + require.Error(t, err) + st, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.FailedPrecondition, st.Code()) + }) + + t.Run("unexpected storage error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + storageErr := fmt.Errorf("unexpected disk failure") + store.On("ByID", uint64(1)).Return(accessmodel.ScheduledTransaction{}, storageErr).Once() + + expectedErr := fmt.Errorf("failed to get scheduled transaction: %w", storageErr) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(t, context.Background(), expectedErr)) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{}, defaultEncoding, + ) + require.Error(t, err) + }) + + // expand is no-op for scheduled and cancelled statuses + for _, status := range []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusScheduled, accessmodel.ScheduledTxStatusCancelled} { + t.Run(fmt.Sprintf("expand is no-op for %s status", status), func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + tx := accessmodel.ScheduledTransaction{ID: 1, Status: status} + store.On("ByID", uint64(1)).Return(tx, nil).Once() + + // expand options set but status is Scheduled: no storage lookups expected + result, err := backend.GetScheduledTransaction( + context.Background(), 1, + ScheduledTransactionExpandOptions{Result: true, Transaction: true}, + defaultEncoding, + ) + require.NoError(t, err) + assert.Nil(t, result.Transaction) + assert.Nil(t, result.Result) + }) + } + + // expand result works for executed and failed transactions + for _, status := range []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusExecuted, accessmodel.ScheduledTxStatusFailed} { + t.Run(fmt.Sprintf("expand result on %s transaction", status), func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + mockProvider := providermock.NewTransactionProvider(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + headers: mockHeaders, + transactionsProvider: mockProvider, + }, + store, scheduledTxLookup, nil, nil, + ) + + txID := unittest.IdentifierFixture() + blockHeader := unittest.BlockHeaderFixture() + blockID := blockHeader.ID() + + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status} + expectedResult := &accessmodel.TransactionResult{ + TransactionID: txID, + BlockID: blockID, + Status: flow.TransactionStatusSealed, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() + mockProvider.On("TransactionResult", mocktestify.Anything, blockHeader, txID, mocktestify.Anything, defaultEncoding). + Return(expectedResult, nil).Once() + + result, err := backend.GetScheduledTransaction( + context.Background(), 1, + ScheduledTransactionExpandOptions{Result: true}, + defaultEncoding, + ) + require.NoError(t, err) + require.NotNil(t, result.Result) + assert.Equal(t, expectedResult, result.Result) + assert.Nil(t, result.Transaction) + }) + } + + // expand tx body works for executed and failed transactions + for _, status := range []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusExecuted, accessmodel.ScheduledTxStatusFailed} { + t.Run(fmt.Sprintf("expand transaction body on %s transaction", status), func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + mockProvider := providermock.NewTransactionProvider(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + headers: mockHeaders, + transactionsProvider: mockProvider, + }, + store, scheduledTxLookup, nil, nil, + ) + + txBody := unittest.TransactionBodyFixture() + txID := txBody.ID() + blockHeader := unittest.BlockHeaderFixture() + blockID := blockHeader.ID() + + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status} + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() + mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). + Return([]*flow.TransactionBody{&txBody}, nil).Once() + + result, err := backend.GetScheduledTransaction( + context.Background(), 1, + ScheduledTransactionExpandOptions{Transaction: true}, + defaultEncoding, + ) + require.NoError(t, err) + require.NotNil(t, result.Transaction) + assert.Equal(t, &txBody, result.Transaction) + assert.Nil(t, result.Result) + }) + } + + t.Run("expand handler contract", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + mockState := protocolmock.NewState(t) + mockSnapshot := protocolmock.NewSnapshot(t) + mockScriptExecutor := executionmock.NewScriptExecutor(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, mockState, mockScriptExecutor, + ) + + handlerOwner := unittest.RandomAddressFixture() + handlerTypeID := "A.1654653399040a61.MyScheduler.Handler" + contractID := "A.1654653399040a61.MyScheduler" + contractBody := []byte("pub contract MyScheduler {}") + sealedHeader := unittest.BlockHeaderFixture() + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: handlerTypeID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + mockState.On("Sealed").Return(mockSnapshot).Once() + mockSnapshot.On("Head").Return(sealedHeader, nil).Once() + mockScriptExecutor.On("GetAccountAtBlockHeight", mocktestify.Anything, handlerOwner, sealedHeader.Height). + Return(&flow.Account{ + Contracts: map[string][]byte{contractID: contractBody}, + }, nil).Once() + + result, err := backend.GetScheduledTransaction( + context.Background(), 1, + ScheduledTransactionExpandOptions{HandlerContract: true}, + defaultEncoding, + ) + require.NoError(t, err) + require.NotNil(t, result.HandlerContract) + assert.Equal(t, contractID, result.HandlerContract.Identifier) + assert.Equal(t, string(contractBody), result.HandlerContract.Body) + }) + + t.Run("TransactionIDByID error during expand triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, scheduledTxLookup, nil, nil, + ) + + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + lookupErr := fmt.Errorf("lookup error") + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(flow.Identifier{}, lookupErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{Result: true}, defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("BlockIDByTransactionID error during expand triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, scheduledTxLookup, nil, nil, + ) + + txID := unittest.IdentifierFixture() + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + blockLookupErr := fmt.Errorf("block lookup error") + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(flow.Identifier{}, blockLookupErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{Result: true}, defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("ByBlockID error during expand triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig, headers: mockHeaders}, + store, scheduledTxLookup, nil, nil, + ) + + txID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + headerErr := fmt.Errorf("header lookup error") + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + mockHeaders.On("ByBlockID", blockID).Return((*flow.Header)(nil), headerErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{Result: true}, defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("ScheduledTransactionsByBlockID error during expand triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + mockProvider := providermock.NewTransactionProvider(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + headers: mockHeaders, + transactionsProvider: mockProvider, + }, + store, scheduledTxLookup, nil, nil, + ) + + txID := unittest.IdentifierFixture() + blockHeader := unittest.BlockHeaderFixture() + blockID := blockHeader.ID() + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + providerErr := fmt.Errorf("provider error") + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() + mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). + Return(nil, providerErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{Transaction: true}, defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("transaction not found in block during expand triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + mockProvider := providermock.NewTransactionProvider(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + headers: mockHeaders, + transactionsProvider: mockProvider, + }, + store, scheduledTxLookup, nil, nil, + ) + + // txID that does NOT match the tx body returned by the provider. + txID := unittest.IdentifierFixture() + blockHeader := unittest.BlockHeaderFixture() + blockID := blockHeader.ID() + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + // otherTxBody.ID() != txID + otherTxBody := unittest.TransactionBodyFixture() + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() + mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). + Return([]*flow.TransactionBody{&otherTxBody}, nil).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{Transaction: true}, defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("TransactionResult error during expand triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + mockProvider := providermock.NewTransactionProvider(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + headers: mockHeaders, + transactionsProvider: mockProvider, + }, + store, scheduledTxLookup, nil, nil, + ) + + txID := unittest.IdentifierFixture() + blockHeader := unittest.BlockHeaderFixture() + blockID := blockHeader.ID() + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + resultErr := fmt.Errorf("result lookup error") + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() + mockProvider.On("TransactionResult", mocktestify.Anything, blockHeader, txID, mocktestify.Anything, defaultEncoding). + Return(nil, resultErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, ScheduledTransactionExpandOptions{Result: true}, defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("expandHandlerContract: state.Sealed().Head() error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + mockState := protocolmock.NewState(t) + mockSnapshot := protocolmock.NewSnapshot(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, mockState, nil, + ) + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + TransactionHandlerOwner: unittest.RandomAddressFixture(), + TransactionHandlerTypeIdentifier: "A.1654653399040a61.MyScheduler.Handler", + } + headErr := fmt.Errorf("sealed head error") + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + mockState.On("Sealed").Return(mockSnapshot).Once() + mockSnapshot.On("Head").Return((*flow.Header)(nil), headErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, + ScheduledTransactionExpandOptions{HandlerContract: true}, + defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("expandHandlerContract: scriptExecutor error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + mockState := protocolmock.NewState(t) + mockSnapshot := protocolmock.NewSnapshot(t) + mockScriptExecutor := executionmock.NewScriptExecutor(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, mockState, mockScriptExecutor, + ) + + handlerOwner := unittest.RandomAddressFixture() + sealedHeader := unittest.BlockHeaderFixture() + execErr := fmt.Errorf("script executor error") + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.1654653399040a61.MyScheduler.Handler", + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + mockState.On("Sealed").Return(mockSnapshot).Once() + mockSnapshot.On("Head").Return(sealedHeader, nil).Once() + mockScriptExecutor.On("GetAccountAtBlockHeight", mocktestify.Anything, handlerOwner, sealedHeader.Height). + Return(nil, execErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, + ScheduledTransactionExpandOptions{HandlerContract: true}, + defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) + + t.Run("expandHandlerContract: contract not found in account triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + mockState := protocolmock.NewState(t) + mockSnapshot := protocolmock.NewSnapshot(t) + mockScriptExecutor := executionmock.NewScriptExecutor(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, mockState, mockScriptExecutor, + ) + + handlerOwner := unittest.RandomAddressFixture() + sealedHeader := unittest.BlockHeaderFixture() + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.1654653399040a61.MyScheduler.Handler", + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + mockState.On("Sealed").Return(mockSnapshot).Once() + mockSnapshot.On("Head").Return(sealedHeader, nil).Once() + // Account exists but does not have the expected contract. + mockScriptExecutor.On("GetAccountAtBlockHeight", mocktestify.Anything, handlerOwner, sealedHeader.Height). + Return(&flow.Account{Contracts: map[string][]byte{}}, nil).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction( + signalerCtx, 1, + ScheduledTransactionExpandOptions{HandlerContract: true}, + defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) +} + +// TestScheduledTransactionsBackend_GetScheduledTransactions tests all code paths for the +// GetScheduledTransactions method, including pagination, filtering, and error handling. +func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { + t.Parallel() + + defaultEncoding := entities.EventEncodingVersion_JSON_CDC_V0 + defaultConfig := DefaultConfig() + + t.Run("happy path: returns transactions without next cursor when fewer than limit", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + txs := []accessmodel.ScheduledTransaction{ + {ID: 5, Status: accessmodel.ScheduledTxStatusScheduled}, + {ID: 3, Status: accessmodel.ScheduledTxStatusExecuted}, + } + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + + page, err := backend.GetScheduledTransactions( + context.Background(), 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + require.Len(t, page.Transactions, 2) + assert.Nil(t, page.NextCursor) + }) + + t.Run("next cursor set when iterator yields more than limit items", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + // limit=2, provide 3 items: CollectResults collects 2, then peeks at item 3 to build cursor + txs := []accessmodel.ScheduledTransaction{ + {ID: 5, Status: accessmodel.ScheduledTxStatusScheduled}, + {ID: 3, Status: accessmodel.ScheduledTxStatusExecuted}, + {ID: 1, Status: accessmodel.ScheduledTxStatusScheduled}, + } + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + + page, err := backend.GetScheduledTransactions( + context.Background(), 2, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + require.Len(t, page.Transactions, 2) + require.NotNil(t, page.NextCursor) + assert.Equal(t, uint64(1), page.NextCursor.ID) + }) + + t.Run("default limit applied when limit is 0", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(nil), nil).Once() + + _, err := backend.GetScheduledTransactions( + context.Background(), 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + }) + + t.Run("explicit limit is respected", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(nil), nil).Once() + + _, err := backend.GetScheduledTransactions( + context.Background(), 10, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + }) + + t.Run("limit exceeding max returns InvalidArgument", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + _, err := backend.GetScheduledTransactions( + context.Background(), 500, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.Error(t, err) + st, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("cursor is forwarded to storage", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + cursor := &accessmodel.ScheduledTransactionCursor{ID: 100} + store.On("All", cursor). + Return(makeScheduledTxIter(nil), nil).Once() + + _, err := backend.GetScheduledTransactions( + context.Background(), 20, cursor, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + }) + + t.Run("empty result set returns empty page", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(nil), nil).Once() + + page, err := backend.GetScheduledTransactions( + context.Background(), 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + assert.Empty(t, page.Transactions) + assert.Nil(t, page.NextCursor) + }) + + t.Run("ErrNotBootstrapped maps to FailedPrecondition", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(nil, storage.ErrNotBootstrapped).Once() + + _, err := backend.GetScheduledTransactions( + context.Background(), 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.Error(t, err) + st, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.FailedPrecondition, st.Code()) + }) + + t.Run("unexpected storage error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + storageErr := fmt.Errorf("unexpected disk failure") + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(nil, storageErr).Once() + + expectedErr := fmt.Errorf("failed to get scheduled transactions: %w", storageErr) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(t, context.Background(), expectedErr)) + + _, err := backend.GetScheduledTransactions( + signalerCtx, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.Error(t, err) + }) + + t.Run("expand error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, scheduledTxLookup, nil, nil, + ) + + txs := []accessmodel.ScheduledTransaction{ + {ID: 1, Status: accessmodel.ScheduledTxStatusExecuted}, + } + lookupErr := fmt.Errorf("lookup failed") + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(flow.Identifier{}, lookupErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransactions( + signalerCtx, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{Result: true}, + defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) +} + +// TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress tests all code paths for the +// GetScheduledTransactionsByAddress method, including pagination, address scoping, and error handling. +func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testing.T) { + t.Parallel() + + defaultEncoding := entities.EventEncodingVersion_JSON_CDC_V0 + defaultConfig := DefaultConfig() + + t.Run("happy path: returns transactions for address without next cursor when fewer than limit", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + txs := []accessmodel.ScheduledTransaction{ + {ID: 7, Status: accessmodel.ScheduledTxStatusScheduled}, + } + + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + + page, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + require.Len(t, page.Transactions, 1) + assert.Equal(t, uint64(7), page.Transactions[0].ID) + assert.Nil(t, page.NextCursor) + }) + + t.Run("default limit applied when limit is 0", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(nil), nil).Once() + + _, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + }) + + t.Run("limit exceeding max returns InvalidArgument", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + + _, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 500, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.Error(t, err) + st, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("cursor is forwarded to storage", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + cursor := &accessmodel.ScheduledTransactionCursor{ID: 50} + store.On("ByAddress", addr, cursor). + Return(makeScheduledTxIter(nil), nil).Once() + + _, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 15, cursor, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + }) + + t.Run("empty result set returns empty page", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(nil), nil).Once() + + page, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + assert.Empty(t, page.Transactions) + assert.Nil(t, page.NextCursor) + }) + + t.Run("ErrNotBootstrapped maps to FailedPrecondition", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(nil, storage.ErrNotBootstrapped).Once() + + _, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.Error(t, err) + st, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, codes.FailedPrecondition, st.Code()) + }) + + t.Run("unexpected storage error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + storageErr := fmt.Errorf("unexpected disk failure") + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(nil, storageErr).Once() + + expectedErr := fmt.Errorf("failed to get scheduled transactions: %w", storageErr) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(t, context.Background(), expectedErr)) + + _, err := backend.GetScheduledTransactionsByAddress( + signalerCtx, addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.Error(t, err) + }) + + t.Run("expand error triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, scheduledTxLookup, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + txs := []accessmodel.ScheduledTransaction{ + {ID: 1, Status: accessmodel.ScheduledTxStatusExecuted}, + } + lookupErr := fmt.Errorf("lookup failed") + + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(flow.Identifier{}, lookupErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransactionsByAddress( + signalerCtx, addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{Result: true}, + defaultEncoding, + ) + require.Error(t, err) + verifyThrown() + }) +} diff --git a/access/backends/extended/mock/api.go b/access/backends/extended/mock/api.go index 29d8b2648a9..c83fbad7928 100644 --- a/access/backends/extended/mock/api.go +++ b/access/backends/extended/mock/api.go @@ -334,3 +334,273 @@ func (_c *API_GetAccountTransactions_Call) RunAndReturn(run func(ctx context.Con _c.Call.Return(run) return _c } + +// GetScheduledTransaction provides a mock function for the type API +func (_mock *API) GetScheduledTransaction(ctx context.Context, id uint64, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion) (*access.ScheduledTransaction, error) { + ret := _mock.Called(ctx, id, expandOptions, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetScheduledTransaction") + } + + var r0 *access.ScheduledTransaction + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) (*access.ScheduledTransaction, error)); ok { + return returnFunc(ctx, id, expandOptions, encodingVersion) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) *access.ScheduledTransaction); ok { + r0 = returnFunc(ctx, id, expandOptions, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ScheduledTransaction) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint64, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) error); ok { + r1 = returnFunc(ctx, id, expandOptions, encodingVersion) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// API_GetScheduledTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetScheduledTransaction' +type API_GetScheduledTransaction_Call struct { + *mock.Call +} + +// GetScheduledTransaction is a helper method to define mock.On call +// - ctx context.Context +// - id uint64 +// - expandOptions extended.ScheduledTransactionExpandOptions +// - encodingVersion entities.EventEncodingVersion +func (_e *API_Expecter) GetScheduledTransaction(ctx interface{}, id interface{}, expandOptions interface{}, encodingVersion interface{}) *API_GetScheduledTransaction_Call { + return &API_GetScheduledTransaction_Call{Call: _e.mock.On("GetScheduledTransaction", ctx, id, expandOptions, encodingVersion)} +} + +func (_c *API_GetScheduledTransaction_Call) Run(run func(ctx context.Context, id uint64, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion)) *API_GetScheduledTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint64 + if args[1] != nil { + arg1 = args[1].(uint64) + } + var arg2 extended.ScheduledTransactionExpandOptions + if args[2] != nil { + arg2 = args[2].(extended.ScheduledTransactionExpandOptions) + } + var arg3 entities.EventEncodingVersion + if args[3] != nil { + arg3 = args[3].(entities.EventEncodingVersion) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *API_GetScheduledTransaction_Call) Return(scheduledTransaction *access.ScheduledTransaction, err error) *API_GetScheduledTransaction_Call { + _c.Call.Return(scheduledTransaction, err) + return _c +} + +func (_c *API_GetScheduledTransaction_Call) RunAndReturn(run func(ctx context.Context, id uint64, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion) (*access.ScheduledTransaction, error)) *API_GetScheduledTransaction_Call { + _c.Call.Return(run) + return _c +} + +// GetScheduledTransactions provides a mock function for the type API +func (_mock *API) GetScheduledTransactions(ctx context.Context, limit uint32, cursor *access.ScheduledTransactionCursor, filter extended.ScheduledTransactionFilter, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion) (*access.ScheduledTransactionsPage, error) { + ret := _mock.Called(ctx, limit, cursor, filter, expandOptions, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetScheduledTransactions") + } + + var r0 *access.ScheduledTransactionsPage + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, uint32, *access.ScheduledTransactionCursor, extended.ScheduledTransactionFilter, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) (*access.ScheduledTransactionsPage, error)); ok { + return returnFunc(ctx, limit, cursor, filter, expandOptions, encodingVersion) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, uint32, *access.ScheduledTransactionCursor, extended.ScheduledTransactionFilter, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) *access.ScheduledTransactionsPage); ok { + r0 = returnFunc(ctx, limit, cursor, filter, expandOptions, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ScheduledTransactionsPage) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, uint32, *access.ScheduledTransactionCursor, extended.ScheduledTransactionFilter, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) error); ok { + r1 = returnFunc(ctx, limit, cursor, filter, expandOptions, encodingVersion) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// API_GetScheduledTransactions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetScheduledTransactions' +type API_GetScheduledTransactions_Call struct { + *mock.Call +} + +// GetScheduledTransactions is a helper method to define mock.On call +// - ctx context.Context +// - limit uint32 +// - cursor *access.ScheduledTransactionCursor +// - filter extended.ScheduledTransactionFilter +// - expandOptions extended.ScheduledTransactionExpandOptions +// - encodingVersion entities.EventEncodingVersion +func (_e *API_Expecter) GetScheduledTransactions(ctx interface{}, limit interface{}, cursor interface{}, filter interface{}, expandOptions interface{}, encodingVersion interface{}) *API_GetScheduledTransactions_Call { + return &API_GetScheduledTransactions_Call{Call: _e.mock.On("GetScheduledTransactions", ctx, limit, cursor, filter, expandOptions, encodingVersion)} +} + +func (_c *API_GetScheduledTransactions_Call) Run(run func(ctx context.Context, limit uint32, cursor *access.ScheduledTransactionCursor, filter extended.ScheduledTransactionFilter, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion)) *API_GetScheduledTransactions_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 uint32 + if args[1] != nil { + arg1 = args[1].(uint32) + } + var arg2 *access.ScheduledTransactionCursor + if args[2] != nil { + arg2 = args[2].(*access.ScheduledTransactionCursor) + } + var arg3 extended.ScheduledTransactionFilter + if args[3] != nil { + arg3 = args[3].(extended.ScheduledTransactionFilter) + } + var arg4 extended.ScheduledTransactionExpandOptions + if args[4] != nil { + arg4 = args[4].(extended.ScheduledTransactionExpandOptions) + } + var arg5 entities.EventEncodingVersion + if args[5] != nil { + arg5 = args[5].(entities.EventEncodingVersion) + } + run( + arg0, + arg1, + arg2, + arg3, + arg4, + arg5, + ) + }) + return _c +} + +func (_c *API_GetScheduledTransactions_Call) Return(scheduledTransactionsPage *access.ScheduledTransactionsPage, err error) *API_GetScheduledTransactions_Call { + _c.Call.Return(scheduledTransactionsPage, err) + return _c +} + +func (_c *API_GetScheduledTransactions_Call) RunAndReturn(run func(ctx context.Context, limit uint32, cursor *access.ScheduledTransactionCursor, filter extended.ScheduledTransactionFilter, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion) (*access.ScheduledTransactionsPage, error)) *API_GetScheduledTransactions_Call { + _c.Call.Return(run) + return _c +} + +// GetScheduledTransactionsByAddress provides a mock function for the type API +func (_mock *API) GetScheduledTransactionsByAddress(ctx context.Context, address flow.Address, limit uint32, cursor *access.ScheduledTransactionCursor, filter extended.ScheduledTransactionFilter, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion) (*access.ScheduledTransactionsPage, error) { + ret := _mock.Called(ctx, address, limit, cursor, filter, expandOptions, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetScheduledTransactionsByAddress") + } + + var r0 *access.ScheduledTransactionsPage + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, *access.ScheduledTransactionCursor, extended.ScheduledTransactionFilter, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) (*access.ScheduledTransactionsPage, error)); ok { + return returnFunc(ctx, address, limit, cursor, filter, expandOptions, encodingVersion) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, *access.ScheduledTransactionCursor, extended.ScheduledTransactionFilter, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) *access.ScheduledTransactionsPage); ok { + r0 = returnFunc(ctx, address, limit, cursor, filter, expandOptions, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ScheduledTransactionsPage) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, *access.ScheduledTransactionCursor, extended.ScheduledTransactionFilter, extended.ScheduledTransactionExpandOptions, entities.EventEncodingVersion) error); ok { + r1 = returnFunc(ctx, address, limit, cursor, filter, expandOptions, encodingVersion) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// API_GetScheduledTransactionsByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetScheduledTransactionsByAddress' +type API_GetScheduledTransactionsByAddress_Call struct { + *mock.Call +} + +// GetScheduledTransactionsByAddress is a helper method to define mock.On call +// - ctx context.Context +// - address flow.Address +// - limit uint32 +// - cursor *access.ScheduledTransactionCursor +// - filter extended.ScheduledTransactionFilter +// - expandOptions extended.ScheduledTransactionExpandOptions +// - encodingVersion entities.EventEncodingVersion +func (_e *API_Expecter) GetScheduledTransactionsByAddress(ctx interface{}, address interface{}, limit interface{}, cursor interface{}, filter interface{}, expandOptions interface{}, encodingVersion interface{}) *API_GetScheduledTransactionsByAddress_Call { + return &API_GetScheduledTransactionsByAddress_Call{Call: _e.mock.On("GetScheduledTransactionsByAddress", ctx, address, limit, cursor, filter, expandOptions, encodingVersion)} +} + +func (_c *API_GetScheduledTransactionsByAddress_Call) Run(run func(ctx context.Context, address flow.Address, limit uint32, cursor *access.ScheduledTransactionCursor, filter extended.ScheduledTransactionFilter, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion)) *API_GetScheduledTransactionsByAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 flow.Address + if args[1] != nil { + arg1 = args[1].(flow.Address) + } + var arg2 uint32 + if args[2] != nil { + arg2 = args[2].(uint32) + } + var arg3 *access.ScheduledTransactionCursor + if args[3] != nil { + arg3 = args[3].(*access.ScheduledTransactionCursor) + } + var arg4 extended.ScheduledTransactionFilter + if args[4] != nil { + arg4 = args[4].(extended.ScheduledTransactionFilter) + } + var arg5 extended.ScheduledTransactionExpandOptions + if args[5] != nil { + arg5 = args[5].(extended.ScheduledTransactionExpandOptions) + } + var arg6 entities.EventEncodingVersion + if args[6] != nil { + arg6 = args[6].(entities.EventEncodingVersion) + } + run( + arg0, + arg1, + arg2, + arg3, + arg4, + arg5, + arg6, + ) + }) + return _c +} + +func (_c *API_GetScheduledTransactionsByAddress_Call) Return(scheduledTransactionsPage *access.ScheduledTransactionsPage, err error) *API_GetScheduledTransactionsByAddress_Call { + _c.Call.Return(scheduledTransactionsPage, err) + return _c +} + +func (_c *API_GetScheduledTransactionsByAddress_Call) RunAndReturn(run func(ctx context.Context, address flow.Address, limit uint32, cursor *access.ScheduledTransactionCursor, filter extended.ScheduledTransactionFilter, expandOptions extended.ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion) (*access.ScheduledTransactionsPage, error)) *API_GetScheduledTransactionsByAddress_Call { + _c.Call.Return(run) + return _c +} diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 60a55168815..f810044932c 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -1113,10 +1113,18 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess builder.ExtendedStorage.NonFungibleTokenTransfersBootstrapper, ) + scheduledTransactions := extended.NewScheduledTransactions( + node.Logger, + builder.ExtendedStorage.ScheduledTransactionsBootstrapper, + builder.ScriptExecutor, + node.RootChainID, + ) + extendedIndexers := []extended.Indexer{ accountTransactions, ftTransfers, nftTransfers, + scheduledTransactions, } extendedIndexer, err := extended.NewExtendedIndexer( @@ -2327,7 +2335,9 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { utils.NotNil(node.Storage.Collections), utils.NotNil(node.Storage.Transactions), builder.scheduledTransactions, + builder.ExtendedStorage.ScheduledTransactionsBootstrapper, txstatus.NewTxStatusDeriver(node.State, lastFullBlockHeight), + utils.NotNil(builder.ScriptExecutor), ) if err != nil { return nil, fmt.Errorf("could not initialize extended backend: %w", err) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index b36c8c3aaa0..2dee6152f53 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -1641,8 +1641,30 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS return nil, fmt.Errorf("could not create account transactions indexer: %w", err) } + ftTransfers := extended.NewFungibleTokenTransfers( + node.Logger, + node.RootChainID, + builder.ExtendedStorage.FungibleTokenTransfersBootstrapper, + ) + + nftTransfers := extended.NewNonFungibleTokenTransfers( + node.Logger, + node.RootChainID, + builder.ExtendedStorage.NonFungibleTokenTransfersBootstrapper, + ) + + scheduledTransactions := extended.NewScheduledTransactions( + node.Logger, + builder.ExtendedStorage.ScheduledTransactionsBootstrapper, + builder.ScriptExecutor, + node.RootChainID, + ) + extendedIndexers := []extended.Indexer{ accountTransactions, + ftTransfers, + nftTransfers, + scheduledTransactions, } extendedIndexer, err := extended.NewExtendedIndexer( @@ -2197,7 +2219,9 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { utils.NotNil(node.Storage.Collections), utils.NotNil(node.Storage.Transactions), builder.scheduledTransactions, + builder.ExtendedStorage.ScheduledTransactionsBootstrapper, txstatus.NewTxStatusDeriver(node.State, builder.lastFullBlockHeight), + builder.ScriptExecutor, ) if err != nil { return nil, fmt.Errorf("could not initialize extended backend: %w", err) diff --git a/engine/access/rest/experimental/models/contract.go b/engine/access/rest/experimental/models/contract.go new file mode 100644 index 00000000000..b121d11f12d --- /dev/null +++ b/engine/access/rest/experimental/models/contract.go @@ -0,0 +1,9 @@ +package models + +import accessmodel "github.com/onflow/flow-go/model/access" + +// Build populates a [Contract] from a domain model. +func (c *Contract) Build(contract *accessmodel.Contract) { + c.Identifier = contract.Identifier + c.Body = contract.Body +} diff --git a/engine/access/rest/experimental/models/model_contract.go b/engine/access/rest/experimental/models/model_contract.go new file mode 100644 index 00000000000..f505b4dcc53 --- /dev/null +++ b/engine/access/rest/experimental/models/model_contract.go @@ -0,0 +1,16 @@ +/* + * Flow Experimental API + * + * Experimental API endpoints for the Flow Access Node. These endpoints are subject to change without notice. Endpoints may be moved to a permanent API once they are stable. + * + * API version: 0.1.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type Contract struct { + // Unique identifier for the contract (e.g. `A.1654653399040a61.MyContract`). + Identifier string `json:"identifier"` + // Full source code of the contract. + Body string `json:"body"` +} diff --git a/engine/access/rest/experimental/models/model_scheduled_transaction.go b/engine/access/rest/experimental/models/model_scheduled_transaction.go new file mode 100644 index 00000000000..6f7d982c8ee --- /dev/null +++ b/engine/access/rest/experimental/models/model_scheduled_transaction.go @@ -0,0 +1,43 @@ +/* + * Flow Experimental API + * + * Experimental API endpoints for the Flow Access Node. These endpoints are subject to change without notice. Endpoints may be moved to a permanent API once they are stable. + * + * API version: 0.1.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +import commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + +type ScheduledTransaction struct { + // Scheduler-assigned uint64 identifier. + Id string `json:"id"` + Status *ScheduledTransactionStatus `json:"status"` + Priority *ScheduledTransactionPriority `json:"priority"` + // Scheduled execution timestamp as a UFix64 decimal string. + Timestamp string `json:"timestamp"` + // Execution effort estimate as a UFix64 decimal string. + ExecutionEffort string `json:"execution_effort"` + // Scheduled fee as a UFix64 decimal string. + Fees string `json:"fees"` + TransactionHandlerOwner string `json:"transaction_handler_owner"` + // Fully qualified Cadence type identifier of the transaction handler (e.g. `A.1654653399040a61.MyScheduler.Handler`). + TransactionHandlerTypeIdentifier string `json:"transaction_handler_type_identifier"` + // Resource UUID of the transaction handler. + TransactionHandlerUuid string `json:"transaction_handler_uuid"` + // Public path of the transaction handler, if set. + TransactionHandlerPublicPath string `json:"transaction_handler_public_path,omitempty"` + // Fees returned on cancellation, as a UFix64 decimal string. + FeesReturned string `json:"fees_returned,omitempty"` + // Fees deducted on cancellation, as a UFix64 decimal string. + FeesDeducted string `json:"fees_deducted,omitempty"` + CreatedTransactionId string `json:"created_transaction_id"` + ExecutedTransactionId string `json:"executed_transaction_id,omitempty"` + CancelledTransactionId string `json:"cancelled_transaction_id,omitempty"` + Transaction *commonmodels.Transaction `json:"transaction,omitempty"` + Result *commonmodels.TransactionResult `json:"result,omitempty"` + HandlerContract *Contract `json:"handler_contract,omitempty"` + Expandable *ScheduledTransactionExpandable `json:"_expandable"` + Links *commonmodels.Links `json:"_links,omitempty"` +} diff --git a/engine/access/rest/experimental/models/model_scheduled_transaction__expandable.go b/engine/access/rest/experimental/models/model_scheduled_transaction__expandable.go new file mode 100644 index 00000000000..d1a34be9e8a --- /dev/null +++ b/engine/access/rest/experimental/models/model_scheduled_transaction__expandable.go @@ -0,0 +1,19 @@ +/* + * Flow Experimental API + * + * Experimental API endpoints for the Flow Access Node. These endpoints are subject to change without notice. Endpoints may be moved to a permanent API once they are stable. + * + * API version: 0.1.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +// Contains URI links for fields not included in the response. When a field is expanded via the `expand` query parameter, it appears inline and is removed from `_expandable`. +type ScheduledTransactionExpandable struct { + // Link to fetch the full transaction body. + Transaction string `json:"transaction,omitempty"` + // Link to fetch the transaction result. + Result string `json:"result,omitempty"` + // Link to fetch the Cadence contract that implements the transaction handler. + HandlerContract string `json:"handler_contract,omitempty"` +} diff --git a/engine/access/rest/experimental/models/model_scheduled_transaction_priority.go b/engine/access/rest/experimental/models/model_scheduled_transaction_priority.go new file mode 100644 index 00000000000..59f083070a3 --- /dev/null +++ b/engine/access/rest/experimental/models/model_scheduled_transaction_priority.go @@ -0,0 +1,19 @@ +/* + * Flow Experimental API + * + * Experimental API endpoints for the Flow Access Node. These endpoints are subject to change without notice. Endpoints may be moved to a permanent API once they are stable. + * + * API version: 0.1.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +// ScheduledTransactionPriority : The execution priority of a scheduled transaction. +type ScheduledTransactionPriority string + +// List of ScheduledTransactionPriority +const ( + LOW_ScheduledTransactionPriority ScheduledTransactionPriority = "low" + MEDIUM_ScheduledTransactionPriority ScheduledTransactionPriority = "medium" + HIGH_ScheduledTransactionPriority ScheduledTransactionPriority = "high" +) diff --git a/engine/access/rest/experimental/models/model_scheduled_transaction_status.go b/engine/access/rest/experimental/models/model_scheduled_transaction_status.go new file mode 100644 index 00000000000..bc78c87bd5f --- /dev/null +++ b/engine/access/rest/experimental/models/model_scheduled_transaction_status.go @@ -0,0 +1,20 @@ +/* + * Flow Experimental API + * + * Experimental API endpoints for the Flow Access Node. These endpoints are subject to change without notice. Endpoints may be moved to a permanent API once they are stable. + * + * API version: 0.1.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +// ScheduledTransactionStatus : The current lifecycle status of a scheduled transaction. +type ScheduledTransactionStatus string + +// List of ScheduledTransactionStatus +const ( + SCHEDULED_ScheduledTransactionStatus ScheduledTransactionStatus = "scheduled" + EXECUTED_ScheduledTransactionStatus ScheduledTransactionStatus = "executed" + CANCELLED_ScheduledTransactionStatus ScheduledTransactionStatus = "cancelled" + FAILED_ScheduledTransactionStatus ScheduledTransactionStatus = "failed" +) diff --git a/engine/access/rest/experimental/models/model_scheduled_transactions_response.go b/engine/access/rest/experimental/models/model_scheduled_transactions_response.go new file mode 100644 index 00000000000..9077f2e4655 --- /dev/null +++ b/engine/access/rest/experimental/models/model_scheduled_transactions_response.go @@ -0,0 +1,14 @@ +/* + * Flow Experimental API + * + * Experimental API endpoints for the Flow Access Node. These endpoints are subject to change without notice. Endpoints may be moved to a permanent API once they are stable. + * + * API version: 0.1.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type ScheduledTransactionsResponse struct { + ScheduledTransactions []ScheduledTransaction `json:"scheduled_transactions"` + NextCursor string `json:"next_cursor,omitempty"` +} diff --git a/engine/access/rest/experimental/models/scheduled_transaction.go b/engine/access/rest/experimental/models/scheduled_transaction.go new file mode 100644 index 00000000000..68830e42c38 --- /dev/null +++ b/engine/access/rest/experimental/models/scheduled_transaction.go @@ -0,0 +1,105 @@ +package models + +import ( + "strconv" + + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +const ( + expandableTransaction = "transaction" + expandableResult = "result" + expandableHandlerContract = "handler_contract" +) + +// Build populates a [ScheduledTransaction] from a domain model. +func (t *ScheduledTransaction) Build( + tx *accessmodel.ScheduledTransaction, + link commonmodels.LinkGenerator, + expand map[string]bool, +) { + t.Id = strconv.FormatUint(tx.ID, 10) + var priority ScheduledTransactionPriority + priority.Build(tx.Priority) + t.Priority = &priority + var status ScheduledTransactionStatus + status.Build(tx.Status) + t.Status = &status + t.Timestamp = strconv.FormatUint(tx.Timestamp, 10) + t.ExecutionEffort = strconv.FormatUint(tx.ExecutionEffort, 10) + t.Fees = strconv.FormatUint(tx.Fees, 10) + t.TransactionHandlerOwner = tx.TransactionHandlerOwner.String() + t.TransactionHandlerTypeIdentifier = tx.TransactionHandlerTypeIdentifier + t.TransactionHandlerUuid = strconv.FormatUint(tx.TransactionHandlerUUID, 10) + t.TransactionHandlerPublicPath = tx.TransactionHandlerPublicPath + + if tx.FeesReturned > 0 { + t.FeesReturned = strconv.FormatUint(tx.FeesReturned, 10) + } + if tx.FeesDeducted > 0 { + t.FeesDeducted = strconv.FormatUint(tx.FeesDeducted, 10) + } + if tx.CreatedTransactionID != flow.ZeroID { + t.CreatedTransactionId = tx.CreatedTransactionID.String() + } + if tx.ExecutedTransactionID != flow.ZeroID { + t.ExecutedTransactionId = tx.ExecutedTransactionID.String() + } + if tx.CancelledTransactionID != flow.ZeroID { + t.CancelledTransactionId = tx.CancelledTransactionID.String() + } + + t.Expandable = new(ScheduledTransactionExpandable) + + if expand[expandableTransaction] && tx.Transaction != nil { + t.Transaction = new(commonmodels.Transaction) + t.Transaction.Build(tx.Transaction, nil, link) + } else { + t.Expandable.Transaction = expandableTransaction + } + + if expand[expandableResult] && tx.Result != nil { + t.Result = new(commonmodels.TransactionResult) + t.Result.Build(tx.Result, tx.ExecutedTransactionID, link) + } else { + t.Expandable.Result = expandableResult + } + + if expand[expandableHandlerContract] && tx.HandlerContract != nil { + t.HandlerContract = new(Contract) + t.HandlerContract.Build(tx.HandlerContract) + } else { + t.Expandable.HandlerContract = expandableHandlerContract + } +} + +// Build sets the [ScheduledTransactionStatus] from a domain status value. +func (s *ScheduledTransactionStatus) Build(status accessmodel.ScheduledTransactionStatus) { + switch status { + case accessmodel.ScheduledTxStatusScheduled: + *s = SCHEDULED_ScheduledTransactionStatus + case accessmodel.ScheduledTxStatusExecuted: + *s = EXECUTED_ScheduledTransactionStatus + case accessmodel.ScheduledTxStatusCancelled: + *s = CANCELLED_ScheduledTransactionStatus + case accessmodel.ScheduledTxStatusFailed: + *s = FAILED_ScheduledTransactionStatus + default: + *s = "" + } +} + +// Build sets the [ScheduledTransactionPriority] from a domain priority value. +// The contract encodes priority as: 0 = high, 1 = medium, 2 = low. +func (p *ScheduledTransactionPriority) Build(priority accessmodel.ScheduledTransactionPriority) { + switch priority { + case accessmodel.ScheduledTxPriorityHigh: + *p = HIGH_ScheduledTransactionPriority + case accessmodel.ScheduledTxPriorityMedium: + *p = MEDIUM_ScheduledTransactionPriority + default: + *p = LOW_ScheduledTransactionPriority + } +} diff --git a/engine/access/rest/experimental/request/cursor_scheduled_transactions.go b/engine/access/rest/experimental/request/cursor_scheduled_transactions.go new file mode 100644 index 00000000000..7d2afba67a7 --- /dev/null +++ b/engine/access/rest/experimental/request/cursor_scheduled_transactions.go @@ -0,0 +1,38 @@ +package request + +import ( + "encoding/base64" + "encoding/json" + "fmt" + + accessmodel "github.com/onflow/flow-go/model/access" +) + +// scheduledTxCursor is the JSON shape of a pagination cursor (opaque to clients). +type scheduledTxCursor struct { + ID uint64 `json:"i"` +} + +// parseScheduledTxCursor decodes a base64-encoded JSON cursor string. +func parseScheduledTxCursor(raw string) (*accessmodel.ScheduledTransactionCursor, error) { + data, err := base64.RawURLEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf("invalid cursor encoding: %w", err) + } + var c scheduledTxCursor + if err := json.Unmarshal(data, &c); err != nil { + return nil, fmt.Errorf("invalid cursor format: %w", err) + } + return &accessmodel.ScheduledTransactionCursor{ID: c.ID}, nil +} + +// EncodeScheduledTxCursor encodes a cursor as base64 URL-encoded JSON. +// +// All errors indicate the cursor is invalid. +func EncodeScheduledTxCursor(cursor *accessmodel.ScheduledTransactionCursor) (string, error) { + data, err := json.Marshal(scheduledTxCursor{ID: cursor.ID}) + if err != nil { + return "", fmt.Errorf("failed to marshal cursor: %w", err) + } + return base64.RawURLEncoding.EncodeToString(data), nil +} diff --git a/engine/access/rest/experimental/request/get_scheduled_transactions.go b/engine/access/rest/experimental/request/get_scheduled_transactions.go new file mode 100644 index 00000000000..368c193fc07 --- /dev/null +++ b/engine/access/rest/experimental/request/get_scheduled_transactions.go @@ -0,0 +1,172 @@ +package request + +import ( + "fmt" + "strconv" + "strings" + + "github.com/onflow/flow-go/access/backends/extended" + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +// GetScheduledTransactions holds parsed request params for the list endpoints. +type GetScheduledTransactions struct { + Address *flow.Address + Limit uint32 + Cursor *accessmodel.ScheduledTransactionCursor + Filter extended.ScheduledTransactionFilter + ExpandOptions extended.ScheduledTransactionExpandOptions +} + +// NewGetScheduledTransactions parses and validates the HTTP request for GET /scheduled. +// +// All errors indicate an invalid request. +func NewGetScheduledTransactions(r *common.Request) (GetScheduledTransactions, error) { + var req GetScheduledTransactions + + if raw := r.GetQueryParam("limit"); raw != "" { + parsed, err := strconv.ParseUint(raw, 10, 32) + if err != nil { + return req, fmt.Errorf("invalid limit: %w", err) + } + req.Limit = uint32(parsed) + } + + if raw := r.GetQueryParam("cursor"); raw != "" { + c, err := parseScheduledTxCursor(raw) + if err != nil { + return req, err + } + req.Cursor = c + } + + if err := parseScheduledTxFilter(r, &req.Filter); err != nil { + return req, err + } + + req.ExpandOptions = parseExpandOptions(r) + + return req, nil +} + +// GetScheduledTransactions holds parsed request params for the list endpoints. +type GetAccountScheduledTransactions struct { + GetScheduledTransactions + Address flow.Address +} + +// NewGetScheduledTransactionsByAddress parses GET /scheduled/account/{address}. +// +// All errors indicate an invalid request. +func NewGetScheduledTransactionsByAddress(r *common.Request) (GetAccountScheduledTransactions, error) { + req, err := NewGetScheduledTransactions(r) + if err != nil { + return GetAccountScheduledTransactions{}, err + } + + address, err := parser.ParseAddress(r.GetVar("address"), r.Chain) + if err != nil { + return GetAccountScheduledTransactions{}, err + } + + return GetAccountScheduledTransactions{ + GetScheduledTransactions: req, + Address: address, + }, nil +} + +// GetScheduledTransaction holds parsed request params for the single-transaction endpoint. +type GetScheduledTransaction struct { + ID uint64 + ExpandOptions extended.ScheduledTransactionExpandOptions +} + +// NewGetScheduledTransaction parses GET /scheduled/transaction/{id}. +// +// All errors indicate an invalid request. +func NewGetScheduledTransaction(r *common.Request) (GetScheduledTransaction, error) { + var req GetScheduledTransaction + + id, err := strconv.ParseUint(r.GetVar("id"), 10, 64) + if err != nil { + return req, fmt.Errorf("invalid scheduled transaction ID: %w", err) + } + req.ID = id + req.ExpandOptions = parseExpandOptions(r) + + return req, nil +} + +// parseScheduledTxFilter parses all optional filter query params from r into filter. +// +// All errors indicate an invalid request. +func parseScheduledTxFilter(r *common.Request, filter *extended.ScheduledTransactionFilter) error { + if raw := r.GetQueryParam("status"); raw != "" { + rawStatuses := strings.Split(raw, ",") + for _, rawStatus := range rawStatuses { + s, err := accessmodel.ParseScheduledTransactionStatus(rawStatus) + if err != nil { + return fmt.Errorf("invalid status: %w", err) + } + filter.Statuses = append(filter.Statuses, s) + } + } + + if raw := r.GetQueryParam("priority"); raw != "" { + p, err := accessmodel.ParseScheduledTransactionPriority(raw) + if err != nil { + return fmt.Errorf("invalid priority: %w", err) + } + filter.Priority = &p + } + + if raw := r.GetQueryParam("start_time"); raw != "" { + v, err := strconv.ParseUint(raw, 10, 64) + if err != nil { + return fmt.Errorf("invalid start_time: %w", err) + } + filter.StartTime = &v + } + + if raw := r.GetQueryParam("end_time"); raw != "" { + v, err := strconv.ParseUint(raw, 10, 64) + if err != nil { + return fmt.Errorf("invalid end_time: %w", err) + } + filter.EndTime = &v + } + + if raw := r.GetQueryParam("handler_owner"); raw != "" { + addr, err := parser.ParseAddress(raw, r.Chain) + if err != nil { + return fmt.Errorf("invalid handler_owner: %w", err) + } + filter.TransactionHandlerOwner = &addr + } + + if raw := r.GetQueryParam("handler_type_id"); raw != "" { + filter.TransactionHandlerTypeID = &raw + } + + if raw := r.GetQueryParam("handler_uuid"); raw != "" { + v, err := strconv.ParseUint(raw, 10, 64) + if err != nil { + return fmt.Errorf("invalid handler_uuid: %w", err) + } + filter.TransactionHandlerUUID = &v + } + + return nil +} + +// parseExpandOptions parses the expand options from the request. +func parseExpandOptions(r *common.Request) extended.ScheduledTransactionExpandOptions { + return extended.ScheduledTransactionExpandOptions{ + Transaction: r.Expands("transaction"), + Result: r.Expands("result"), + HandlerContract: r.Expands("handler_contract"), + } +} diff --git a/engine/access/rest/experimental/routes/scheduled_transactions.go b/engine/access/rest/experimental/routes/scheduled_transactions.go new file mode 100644 index 00000000000..0a485239122 --- /dev/null +++ b/engine/access/rest/experimental/routes/scheduled_transactions.go @@ -0,0 +1,108 @@ +package routes + +import ( + "net/http" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access/backends/extended" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/experimental/models" + "github.com/onflow/flow-go/engine/access/rest/experimental/request" + accessmodel "github.com/onflow/flow-go/model/access" +) + +// GetScheduledTransactions handles GET /scheduled. +func GetScheduledTransactions(r *common.Request, backend extended.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.NewGetScheduledTransactions(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + page, err := backend.GetScheduledTransactions( + r.Context(), + req.Limit, + req.Cursor, + req.Filter, + req.ExpandOptions, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + + return buildScheduledTransactionsResponse(page, link, r.ExpandFields) +} + +// GetScheduledTransaction handles GET /scheduled/transaction/{id}. +func GetScheduledTransaction(r *common.Request, backend extended.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.NewGetScheduledTransaction(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + tx, err := backend.GetScheduledTransaction( + r.Context(), + req.ID, + req.ExpandOptions, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + + var m models.ScheduledTransaction + m.Build(tx, link, r.ExpandFields) + return m, nil +} + +// GetScheduledTransactionsByAddress handles GET /scheduled/account/{address}. +func GetScheduledTransactionsByAddress(r *common.Request, backend extended.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.NewGetScheduledTransactionsByAddress(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + page, err := backend.GetScheduledTransactionsByAddress( + r.Context(), + req.Address, + req.Limit, + req.Cursor, + req.Filter, + req.ExpandOptions, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + + return buildScheduledTransactionsResponse(page, link, r.ExpandFields) +} + +// buildScheduledTransactionsResponse converts a [accessmodel.ScheduledTransactionsPage] to a REST +// response, encoding the next cursor if present. +func buildScheduledTransactionsResponse( + page *accessmodel.ScheduledTransactionsPage, + link commonmodels.LinkGenerator, + expandMap map[string]bool, +) (models.ScheduledTransactionsResponse, error) { + scheduledTransactions := make([]models.ScheduledTransaction, len(page.Transactions)) + for i := range page.Transactions { + scheduledTransactions[i].Build(&page.Transactions[i], link, expandMap) + } + + var nextCursor string + if page.NextCursor != nil { + var err error + nextCursor, err = request.EncodeScheduledTxCursor(page.NextCursor) + if err != nil { + return models.ScheduledTransactionsResponse{}, common.NewRestError(http.StatusInternalServerError, "failed to encode next cursor", err) + } + } + + return models.ScheduledTransactionsResponse{ + ScheduledTransactions: scheduledTransactions, + NextCursor: nextCursor, + }, nil +} diff --git a/engine/access/rest/experimental/routes/scheduled_transactions_test.go b/engine/access/rest/experimental/routes/scheduled_transactions_test.go new file mode 100644 index 00000000000..97af86e85b1 --- /dev/null +++ b/engine/access/rest/experimental/routes/scheduled_transactions_test.go @@ -0,0 +1,671 @@ +package routes_test + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access/backends/extended" + extendedmock "github.com/onflow/flow-go/access/backends/extended/mock" + "github.com/onflow/flow-go/engine/access/rest/experimental/request" + "github.com/onflow/flow-go/engine/access/rest/router" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/utils/unittest" +) + +type scheduledTxURLParams struct { + limit string + cursor string + status string + expand string +} + +func scheduledTxsURL(t *testing.T, params scheduledTxURLParams) string { + u, err := url.ParseRequestURI("/experimental/v1/scheduled") + require.NoError(t, err) + q := u.Query() + if params.limit != "" { + q.Add("limit", params.limit) + } + if params.cursor != "" { + q.Add("cursor", params.cursor) + } + if params.status != "" { + q.Add("status", params.status) + } + if params.expand != "" { + q.Add("expand", params.expand) + } + u.RawQuery = q.Encode() + return u.String() +} + +func scheduledTxByIDURL(t *testing.T, id uint64, params scheduledTxURLParams) string { + u, err := url.ParseRequestURI(fmt.Sprintf("/experimental/v1/scheduled/transaction/%d", id)) + require.NoError(t, err) + if params.expand != "" { + q := u.Query() + q.Add("expand", params.expand) + u.RawQuery = q.Encode() + } + return u.String() +} + +func scheduledTxsByAddrURL(t *testing.T, address string, params scheduledTxURLParams) string { + u, err := url.ParseRequestURI(fmt.Sprintf("/experimental/v1/scheduled/account/%s", address)) + require.NoError(t, err) + q := u.Query() + if params.limit != "" { + q.Add("limit", params.limit) + } + if params.cursor != "" { + q.Add("cursor", params.cursor) + } + if params.status != "" { + q.Add("status", params.status) + } + if params.expand != "" { + q.Add("expand", params.expand) + } + u.RawQuery = q.Encode() + return u.String() +} + +// testEncodeScheduledTxCursor encodes a cursor the same way the handler does, for use in +// test assertions and inputs. +func testEncodeScheduledTxCursor(t *testing.T, id uint64) string { + data, err := request.EncodeScheduledTxCursor(&accessmodel.ScheduledTransactionCursor{ID: id}) + require.NoError(t, err) + return data +} + +func TestGetScheduledTransactions(t *testing.T) { + handlerOwner := unittest.AddressFixture() + + tx1CreatedID := unittest.IdentifierFixture() + tx1 := accessmodel.ScheduledTransaction{ + ID: 100, + Priority: 0, // high + Timestamp: 1000000, + ExecutionEffort: 500, + Fees: 250, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.0000.MyScheduler.Handler", + TransactionHandlerUUID: 7, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: tx1CreatedID, + } + tx2CreatedID := unittest.IdentifierFixture() + tx2ExecutedID := unittest.IdentifierFixture() + tx2 := accessmodel.ScheduledTransaction{ + ID: 99, + Priority: 1, // medium + Timestamp: 999000, + ExecutionEffort: 200, + Fees: 100, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.0000.MyScheduler.Handler", + TransactionHandlerUUID: 8, + Status: accessmodel.ScheduledTxStatusExecuted, + CreatedTransactionID: tx2CreatedID, + ExecutedTransactionID: tx2ExecutedID, + } + + t.Run("happy path with next cursor", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx1, tx2}, + NextCursor: &accessmodel.ScheduledTransactionCursor{ID: 99}, + } + + backend.On("GetScheduledTransactions", + mocktestify.Anything, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + + expectedNextCursor := testEncodeScheduledTxCursor(t, 99) + expected := fmt.Sprintf(`{ + "scheduled_transactions": [ + { + "id": "100", + "status": "scheduled", + "priority": "high", + "timestamp": "1000000", + "execution_effort": "500", + "fees": "250", + "transaction_handler_owner": "%s", + "transaction_handler_type_identifier": "A.0000.MyScheduler.Handler", + "transaction_handler_uuid": "7", + "created_transaction_id": "%s", + "_expandable": { + "transaction": "transaction", + "result": "result", + "handler_contract": "handler_contract" + } + }, + { + "id": "99", + "status": "executed", + "priority": "medium", + "timestamp": "999000", + "execution_effort": "200", + "fees": "100", + "transaction_handler_owner": "%s", + "transaction_handler_type_identifier": "A.0000.MyScheduler.Handler", + "transaction_handler_uuid": "8", + "created_transaction_id": "%s", + "executed_transaction_id": "%s", + "_expandable": { + "transaction": "transaction", + "result": "result", + "handler_contract": "handler_contract" + } + } + ], + "next_cursor": "%s" + }`, handlerOwner.String(), tx1CreatedID.String(), handlerOwner.String(), tx2CreatedID.String(), tx2ExecutedID.String(), expectedNextCursor) + + assert.JSONEq(t, expected, rr.Body.String()) + }) + + t.Run("last page without next cursor", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx1}, + } + + backend.On("GetScheduledTransactions", + mocktestify.Anything, + uint32(10), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{limit: "10"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + assert.NotContains(t, rr.Body.String(), "next_cursor") + }) + + t.Run("with cursor parameter", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx2}, + } + + backend.On("GetScheduledTransactions", + mocktestify.Anything, + uint32(0), + &accessmodel.ScheduledTransactionCursor{ID: 100}, + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{ + cursor: testEncodeScheduledTxCursor(t, 100), + }), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + assert.Equal(t, http.StatusOK, rr.Code) + }) + + t.Run("with status filter", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{}, + } + + backend.On("GetScheduledTransactions", + mocktestify.Anything, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{ + Statuses: []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusScheduled}, + }, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{status: "scheduled"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + assert.Equal(t, http.StatusOK, rr.Code) + }) + + t.Run("invalid limit", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{limit: "abc"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid limit") + }) + + t.Run("invalid cursor", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{cursor: "!notbase64!"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid cursor encoding") + }) + + t.Run("invalid status", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{status: "unknown"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid status") + }) + + t.Run("backend returns failed precondition", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + backend.On("GetScheduledTransactions", + mocktestify.Anything, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(nil, status.Errorf(codes.FailedPrecondition, "index not initialized")) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsURL(t, scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "Precondition failed") + }) +} + +func TestGetScheduledTransaction(t *testing.T) { + handlerOwner := unittest.AddressFixture() + + txCreatedID := unittest.IdentifierFixture() + tx := &accessmodel.ScheduledTransaction{ + ID: 42, + Priority: 0, // high + Timestamp: 2000000, + ExecutionEffort: 750, + Fees: 300, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.0000.MyScheduler.Handler", + TransactionHandlerUUID: 3, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: txCreatedID, + } + + t.Run("happy path", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + backend.On("GetScheduledTransaction", + mocktestify.Anything, + uint64(42), + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(tx, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxByIDURL(t, 42, scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + + expected := fmt.Sprintf(`{ + "id": "42", + "status": "scheduled", + "priority": "high", + "timestamp": "2000000", + "execution_effort": "750", + "fees": "300", + "transaction_handler_owner": "%s", + "transaction_handler_type_identifier": "A.0000.MyScheduler.Handler", + "transaction_handler_uuid": "3", + "created_transaction_id": "%s", + "_expandable": { + "transaction": "transaction", + "result": "result", + "handler_contract": "handler_contract" + } + }`, handlerOwner.String(), txCreatedID.String()) + + assert.JSONEq(t, expected, rr.Body.String()) + }) + + t.Run("invalid ID - non-numeric", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + req, err := http.NewRequest(http.MethodGet, "/experimental/v1/scheduled/transaction/notanumber", nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid scheduled transaction ID") + }) + + t.Run("with handler_contract expand", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + txWithContract := &accessmodel.ScheduledTransaction{ + ID: 42, + Priority: 0, + Timestamp: 2000000, + ExecutionEffort: 750, + Fees: 300, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.0000.MyScheduler.Handler", + TransactionHandlerUUID: 3, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: txCreatedID, + HandlerContract: &accessmodel.Contract{ + Identifier: "A.0000.MyScheduler", + Body: "pub contract MyScheduler {}", + }, + } + + backend.On("GetScheduledTransaction", + mocktestify.Anything, + uint64(42), + extended.ScheduledTransactionExpandOptions{HandlerContract: true}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(txWithContract, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxByIDURL(t, 42, scheduledTxURLParams{expand: "handler_contract"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + + expected := fmt.Sprintf(`{ + "id": "42", + "status": "scheduled", + "priority": "high", + "timestamp": "2000000", + "execution_effort": "750", + "fees": "300", + "transaction_handler_owner": "%s", + "transaction_handler_type_identifier": "A.0000.MyScheduler.Handler", + "transaction_handler_uuid": "3", + "created_transaction_id": "%s", + "handler_contract": { + "identifier": "A.0000.MyScheduler", + "body": "pub contract MyScheduler {}" + }, + "_expandable": { + "transaction": "transaction", + "result": "result" + } + }`, handlerOwner.String(), txCreatedID.String()) + + assert.JSONEq(t, expected, rr.Body.String()) + }) + + t.Run("backend returns not found", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + backend.On("GetScheduledTransaction", + mocktestify.Anything, + uint64(999), + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(nil, status.Errorf(codes.NotFound, "scheduled transaction 999 not found")) + + req, err := http.NewRequest(http.MethodGet, scheduledTxByIDURL(t, 999, scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusNotFound, rr.Code) + }) +} + +func TestGetScheduledTransactionsByAddress(t *testing.T) { + address := unittest.AddressFixture() + handlerOwner := unittest.AddressFixture() + + tx1CreatedID := unittest.IdentifierFixture() + tx1 := accessmodel.ScheduledTransaction{ + ID: 50, + Priority: 0, // high + Timestamp: 5000000, + ExecutionEffort: 300, + Fees: 150, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.0000.MyScheduler.Handler", + TransactionHandlerUUID: 5, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: tx1CreatedID, + } + tx2CreatedID := unittest.IdentifierFixture() + tx2CancelledID := unittest.IdentifierFixture() + tx2 := accessmodel.ScheduledTransaction{ + ID: 49, + Priority: 2, // low + Timestamp: 4500000, + ExecutionEffort: 100, + Fees: 50, + TransactionHandlerOwner: handlerOwner, + TransactionHandlerTypeIdentifier: "A.0000.MyScheduler.Handler", + TransactionHandlerUUID: 6, + Status: accessmodel.ScheduledTxStatusCancelled, + CreatedTransactionID: tx2CreatedID, + CancelledTransactionID: tx2CancelledID, + } + + t.Run("happy path with next cursor", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx1, tx2}, + NextCursor: &accessmodel.ScheduledTransactionCursor{ID: 49}, + } + + backend.On("GetScheduledTransactionsByAddress", + mocktestify.Anything, + address, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, address.String(), scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + assert.Contains(t, rr.Body.String(), testEncodeScheduledTxCursor(t, 49)) + }) + + t.Run("last page without next cursor", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx1}, + } + + backend.On("GetScheduledTransactionsByAddress", + mocktestify.Anything, + address, + uint32(5), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, address.String(), scheduledTxURLParams{limit: "5"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + assert.NotContains(t, rr.Body.String(), "next_cursor") + }) + + t.Run("with cursor parameter", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx2}, + } + + backend.On("GetScheduledTransactionsByAddress", + mocktestify.Anything, + address, + uint32(0), + &accessmodel.ScheduledTransactionCursor{ID: 50}, + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, address.String(), scheduledTxURLParams{ + cursor: testEncodeScheduledTxCursor(t, 50), + }), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + assert.Equal(t, http.StatusOK, rr.Code) + }) + + t.Run("address with 0x prefix", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + page := &accessmodel.ScheduledTransactionsPage{ + Transactions: []accessmodel.ScheduledTransaction{tx1}, + } + + backend.On("GetScheduledTransactionsByAddress", + mocktestify.Anything, + address, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(page, nil) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, "0x"+address.String(), scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + assert.Equal(t, http.StatusOK, rr.Code) + }) + + t.Run("invalid address", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, "invalid", scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid address") + }) + + t.Run("invalid cursor", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, address.String(), scheduledTxURLParams{cursor: "badcursor"}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid cursor encoding") + }) + + t.Run("backend returns not found", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + backend.On("GetScheduledTransactionsByAddress", + mocktestify.Anything, + address, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(nil, status.Errorf(codes.NotFound, "no transactions for address %s", address)) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, address.String(), scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusNotFound, rr.Code) + }) + + t.Run("backend returns failed precondition", func(t *testing.T) { + backend := extendedmock.NewAPI(t) + + backend.On("GetScheduledTransactionsByAddress", + mocktestify.Anything, + address, + uint32(0), + (*accessmodel.ScheduledTransactionCursor)(nil), + extended.ScheduledTransactionFilter{}, + extended.ScheduledTransactionExpandOptions{}, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(nil, status.Errorf(codes.FailedPrecondition, "index not initialized")) + + req, err := http.NewRequest(http.MethodGet, scheduledTxsByAddrURL(t, address.String(), scheduledTxURLParams{}), nil) + require.NoError(t, err) + + rr := router.ExecuteExperimentalRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.Contains(t, rr.Body.String(), "Precondition failed") + }) +} diff --git a/engine/access/rest/router/routes_experimental.go b/engine/access/rest/router/routes_experimental.go index 48acc5f9901..4ccc25fac9a 100644 --- a/engine/access/rest/router/routes_experimental.go +++ b/engine/access/rest/router/routes_experimental.go @@ -31,4 +31,19 @@ var ExperimentalRoutes = []experimentalRoute{{ Pattern: "/accounts/{address}/nft/transfers", Name: "getAccountNonFungibleTokenTransfers", Handler: routes.GetAccountNonFungibleTokenTransfers, +}, { + Method: http.MethodGet, + Pattern: "/scheduled", + Name: "getScheduledTransactions", + Handler: routes.GetScheduledTransactions, +}, { + Method: http.MethodGet, + Pattern: "/scheduled/transaction/{id}", + Name: "getScheduledTransaction", + Handler: routes.GetScheduledTransaction, +}, { + Method: http.MethodGet, + Pattern: "/scheduled/account/{address}", + Name: "getScheduledTransactionsByAddress", + Handler: routes.GetScheduledTransactionsByAddress, }} diff --git a/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go b/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go new file mode 100644 index 00000000000..50f8cf200c1 --- /dev/null +++ b/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go @@ -0,0 +1,253 @@ +package cohort3 + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" +) + +// TestScheduledTransactionLifecycle tests the full lifecycle: +// 1. Schedule a transaction (Scheduled event → status "scheduled") +// 2. Wait for execution (Executed event → status "executed") +// 3. Schedule and cancel another (Canceled event → status "cancelled") +// 4. Verify all data via the REST endpoints and pagination. +func (s *ExtendedIndexingSuite) TestScheduledTransactionLifecycle() { + accessClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + s.Require().NoError(err) + + sc := systemcontracts.SystemContractsForChain(s.net.Root().HeaderBody.ChainID) + + // Deploy the test handler contract. + deployTxID, err := lib.DeployScheduledTransactionsTestContract(accessClient, sc) + s.Require().NoError(err, "could not deploy test handler contract") + + _, err = accessClient.WaitForSealed(context.Background(), deployTxID) + s.Require().NoError(err) + s.T().Log("test handler contract deployed") + + // ---- Schedule tx1 with a near-future timestamp so it executes quickly ---- + nearFutureTimestamp := time.Now().Unix() + 5 + scheduledID1, err := lib.ScheduleTransactionAtTimestamp(nearFutureTimestamp, accessClient, sc) + s.Require().NoError(err) + s.Require().NotZero(scheduledID1) + s.T().Logf("scheduled tx1 with scheduler ID: %d", scheduledID1) + + // ---- Schedule tx2 far in the future, then cancel it ---- + futureTimestamp := time.Now().Unix() + 3600 + scheduledID2, err := lib.ScheduleTransactionAtTimestamp(futureTimestamp, accessClient, sc) + s.Require().NoError(err) + s.Require().NotZero(scheduledID2) + s.T().Logf("scheduled tx2 with scheduler ID: %d", scheduledID2) + + cancelledID, err := lib.CancelTransactionByID(scheduledID2, accessClient, sc) + s.Require().NoError(err) + s.T().Logf("cancelled tx2 (scheduler ID: %d, cancel event ID: %d)", scheduledID2, cancelledID) + + // ---- Wait for the extended indexer to process enough blocks ---- + latestHeader, err := accessClient.GetLatestFinalizedBlockHeader(context.Background()) + s.Require().NoError(err) + + waitCtx, waitCancel := context.WithTimeout(context.Background(), 120*time.Second) + defer waitCancel() + err = accessClient.WaitUntilIndexed(waitCtx, uint64(latestHeader.Height)+5) + s.Require().NoError(err, "extended indexer did not catch up in time") + s.T().Log("extended indexer caught up") + + // ---- Verify tx1 is executed ---- + s.verifyScheduledTxStatus(scheduledID1, "executed") + + // ---- Verify tx2 is cancelled ---- + s.verifyScheduledTxStatus(scheduledID2, "cancelled") + + // ---- Verify the /scheduled endpoint lists both ---- + allTxs := s.fetchAllScheduledTxs(20) + s.T().Logf("found %d scheduled transactions in /scheduled", len(allTxs)) + + var foundID1, foundID2 bool + for _, tx := range allTxs { + idStr := fmt.Sprintf("%v", tx["id"]) + if idStr == fmt.Sprintf("%d", scheduledID1) { + foundID1 = true + s.Equal("executed", tx["status"], "tx1 should be executed") + } + if idStr == fmt.Sprintf("%d", scheduledID2) { + foundID2 = true + s.Equal("cancelled", tx["status"], "tx2 should be cancelled") + } + } + s.True(foundID1, "tx1 (executed) should appear in /scheduled") + s.True(foundID2, "tx2 (cancelled) should appear in /scheduled") + + // ---- Verify /scheduled/account/{address} scopes to owner ---- + ownerAddr := flow.Address(accessClient.SDKServiceAddress()).String() + addrTxs := s.fetchAllScheduledTxsByAddress(ownerAddr, 20) + s.T().Logf("found %d scheduled transactions in /scheduled/account/{address}", len(addrTxs)) + + var addrFoundID1, addrFoundID2 bool + for _, tx := range addrTxs { + idStr := fmt.Sprintf("%v", tx["id"]) + if idStr == fmt.Sprintf("%d", scheduledID1) { + addrFoundID1 = true + } + if idStr == fmt.Sprintf("%d", scheduledID2) { + addrFoundID2 = true + } + } + s.True(addrFoundID1, "tx1 should appear in /scheduled/account/{address}") + s.True(addrFoundID2, "tx2 should appear in /scheduled/account/{address}") + + // ---- Verify pagination works via /scheduled with limit=1 ---- + s.verifyScheduledTxPagination() + + // ---- Verify status filter ---- + executedTxs := s.fetchScheduledTxsWithFilter("status=executed") + for _, tx := range executedTxs { + s.Equal("executed", tx["status"], "status filter should only return executed txs") + } + + cancelledTxs := s.fetchScheduledTxsWithFilter("status=cancelled") + for _, tx := range cancelledTxs { + s.Equal("cancelled", tx["status"], "status filter should only return cancelled txs") + } +} + +// verifyScheduledTxStatus polls GET /experimental/v1/scheduled/transaction/{id} until the +// expected status is returned. +func (s *ExtendedIndexingSuite) verifyScheduledTxStatus(id uint64, expectedStatus string) { + url := fmt.Sprintf("%s/experimental/v1/scheduled/transaction/%d", s.restBaseURL, id) + require.Eventually(s.T(), func() bool { + tx := s.fetchScheduledTxJSON(url) + if tx == nil { + return false + } + actual, _ := tx["status"].(string) + if actual != expectedStatus { + s.T().Logf("waiting for tx %d status %q, got %q", id, expectedStatus, actual) + return false + } + return true + }, 60*time.Second, 2*time.Second, "tx %d did not reach status %q", id, expectedStatus) +} + +// fetchAllScheduledTxs paginates through GET /experimental/v1/scheduled and returns all results. +func (s *ExtendedIndexingSuite) fetchAllScheduledTxs(pageSize int) []map[string]any { + return s.collectScheduledPages( + fmt.Sprintf("%s/experimental/v1/scheduled?limit=%d", s.restBaseURL, pageSize), + pageSize, + ) +} + +// fetchAllScheduledTxsByAddress paginates through GET /experimental/v1/scheduled/account/{address}. +func (s *ExtendedIndexingSuite) fetchAllScheduledTxsByAddress(address string, pageSize int) []map[string]any { + return s.collectScheduledPages( + fmt.Sprintf("%s/experimental/v1/scheduled/account/%s?limit=%d", s.restBaseURL, address, pageSize), + pageSize, + ) +} + +// fetchScheduledTxsWithFilter fetches /experimental/v1/scheduled with the given query string filter. +func (s *ExtendedIndexingSuite) fetchScheduledTxsWithFilter(filter string) []map[string]any { + url := fmt.Sprintf("%s/experimental/v1/scheduled?limit=100&%s", s.restBaseURL, filter) + body := s.fetchScheduledTxJSONBody(url) + if body == nil { + return nil + } + txs, _ := body["scheduled_transactions"].([]any) + return toMapSlice(txs) +} + +// verifyScheduledTxPagination verifies that paginating through results one at a time yields the +// same total as fetching all at once. +func (s *ExtendedIndexingSuite) verifyScheduledTxPagination() { + allAtOnce := s.fetchAllScheduledTxs(100) + allPaged := s.collectScheduledPages( + fmt.Sprintf("%s/experimental/v1/scheduled?limit=1", s.restBaseURL), + 1, + ) + + s.Require().Equal(len(allAtOnce), len(allPaged), + "paginated results should equal unpaginated results") + + for i := range allAtOnce { + s.Equal(allAtOnce[i]["id"], allPaged[i]["id"], + "tx at index %d should have the same ID", i) + } +} + +// collectScheduledPages follows next_cursor links to collect all transactions across all pages. +func (s *ExtendedIndexingSuite) collectScheduledPages(firstURL string, pageSize int) []map[string]any { + var all []map[string]any + url := firstURL + for { + body := s.fetchScheduledTxJSONBody(url) + if body == nil { + break + } + txs, _ := body["scheduled_transactions"].([]any) + all = append(all, toMapSlice(txs)...) + + nextCursor, _ := body["next_cursor"].(string) + if nextCursor == "" { + break + } + url = fmt.Sprintf("%s/experimental/v1/scheduled?limit=%d&cursor=%s", + s.restBaseURL, pageSize, nextCursor) + } + return all +} + +// fetchScheduledTxJSON fetches JSON from the given URL. Returns nil on non-200 or error. +func (s *ExtendedIndexingSuite) fetchScheduledTxJSON(url string) map[string]any { + resp, err := http.Get(url) //nolint:gosec + if err != nil { + return nil + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil + } + var result map[string]any + if err := json.Unmarshal(body, &result); err != nil { + return nil + } + return result +} + +// fetchScheduledTxJSONBody fetches JSON from the given URL with require.Eventually retry logic. +func (s *ExtendedIndexingSuite) fetchScheduledTxJSONBody(url string) map[string]any { + var result map[string]any + require.Eventually(s.T(), func() bool { + r := s.fetchScheduledTxJSON(url) + if r == nil { + return false + } + result = r + return true + }, 30*time.Second, 1*time.Second, "REST GET %s should succeed", url) + return result +} + +// toMapSlice converts a []any (from JSON unmarshaling) to []map[string]any. +func toMapSlice(in []any) []map[string]any { + out := make([]map[string]any, 0, len(in)) + for _, item := range in { + if m, ok := item.(map[string]any); ok { + out = append(out, m) + } + } + return out +} diff --git a/integration/tests/access/cohort3/extended_indexing_test.go b/integration/tests/access/cohort3/extended_indexing_test.go index 00d8bc82b59..1415601db0f 100644 --- a/integration/tests/access/cohort3/extended_indexing_test.go +++ b/integration/tests/access/cohort3/extended_indexing_test.go @@ -84,6 +84,12 @@ func (s *ExtendedIndexingSuite) SetupTest() { testnet.WithLogLevel(zerolog.FatalLevel), } + executionConfigs := []func(config *testnet.NodeConfig){ + testnet.WithLogLevel(zerolog.FatalLevel), + // Enable scheduled transaction execution so PendingExecution/Executed events are emitted. + testnet.WithAdditionalFlag("--scheduled-callbacks-enabled=true"), + } + // Access node with execution data sync, execution data indexing, and extended indexing enabled. accessNodeOpts := []func(config *testnet.NodeConfig){ testnet.WithLogLevel(zerolog.InfoLevel), @@ -98,8 +104,8 @@ func (s *ExtendedIndexingSuite) SetupTest() { nodeConfigs := []testnet.NodeConfig{ testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, executionConfigs...), + testnet.NewNodeConfig(flow.RoleExecution, executionConfigs...), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), diff --git a/model/access/contract.go b/model/access/contract.go new file mode 100644 index 00000000000..ebe5ebccfb6 --- /dev/null +++ b/model/access/contract.go @@ -0,0 +1,7 @@ +package access + +// Contract represents a Cadence smart contract as returned by the extended API. +type Contract struct { + Identifier string + Body string +} diff --git a/model/access/scheduled_transaction.go b/model/access/scheduled_transaction.go new file mode 100644 index 00000000000..bdfdd0ce38f --- /dev/null +++ b/model/access/scheduled_transaction.go @@ -0,0 +1,136 @@ +package access + +import ( + "fmt" + "strings" + + "github.com/onflow/flow-go/model/flow" +) + +// ScheduledTransactionStatus represents the lifecycle state of a scheduled transaction. +type ScheduledTransactionStatus int8 + +const ( + ScheduledTxStatusScheduled ScheduledTransactionStatus = iota + ScheduledTxStatusExecuted + ScheduledTxStatusCancelled + ScheduledTxStatusFailed +) + +var scheduledTransactionStatusStrings = map[ScheduledTransactionStatus]string{ + ScheduledTxStatusScheduled: "scheduled", + ScheduledTxStatusExecuted: "executed", + ScheduledTxStatusCancelled: "cancelled", + ScheduledTxStatusFailed: "failed", +} + +// String returns the string representation of the status. +func (s ScheduledTransactionStatus) String() string { + if str, ok := scheduledTransactionStatusStrings[s]; ok { + return str + } + panic(fmt.Sprintf("unknown scheduled transaction status: %d", s)) +} + +// ParseScheduledTransactionStatus parses a string into a ScheduledTransactionStatus. +// +// Any error indicates the string is not a valid status. +func ParseScheduledTransactionStatus(s string) (ScheduledTransactionStatus, error) { + switch strings.ToLower(s) { + case scheduledTransactionStatusStrings[ScheduledTxStatusScheduled]: + return ScheduledTxStatusScheduled, nil + case scheduledTransactionStatusStrings[ScheduledTxStatusExecuted]: + return ScheduledTxStatusExecuted, nil + case scheduledTransactionStatusStrings[ScheduledTxStatusCancelled]: + return ScheduledTxStatusCancelled, nil + case scheduledTransactionStatusStrings[ScheduledTxStatusFailed]: + return ScheduledTxStatusFailed, nil + default: + return 0, fmt.Errorf("unknown scheduled transaction status: %s", s) + } +} + +// ScheduledTransactionPriority represents the execution priority of a scheduled transaction. +type ScheduledTransactionPriority uint8 + +const ( + ScheduledTxPriorityHigh ScheduledTransactionPriority = 0 + ScheduledTxPriorityMedium ScheduledTransactionPriority = 1 + ScheduledTxPriorityLow ScheduledTransactionPriority = 2 +) + +var scheduledTransactionPriorityStrings = map[ScheduledTransactionPriority]string{ + ScheduledTxPriorityHigh: "high", + ScheduledTxPriorityMedium: "medium", + ScheduledTxPriorityLow: "low", +} + +// String returns the string representation of the priority. +func (p ScheduledTransactionPriority) String() string { + if str, ok := scheduledTransactionPriorityStrings[p]; ok { + return str + } + panic(fmt.Sprintf("unknown scheduled transaction priority: %d", p)) +} + +// ParseScheduledTransactionPriority parses a string into a ScheduledTransactionPriority. +// +// Any error indicates the string is not a valid priority. +func ParseScheduledTransactionPriority(s string) (ScheduledTransactionPriority, error) { + switch strings.ToLower(s) { + case scheduledTransactionPriorityStrings[ScheduledTxPriorityHigh]: + return ScheduledTxPriorityHigh, nil + case scheduledTransactionPriorityStrings[ScheduledTxPriorityMedium]: + return ScheduledTxPriorityMedium, nil + case scheduledTransactionPriorityStrings[ScheduledTxPriorityLow]: + return ScheduledTxPriorityLow, nil + default: + return 0, fmt.Errorf("unknown scheduled transaction priority: %s", s) + } +} + +// ScheduledTransaction represents a scheduled transaction as indexed by the access node. +type ScheduledTransaction struct { + ID uint64 + Priority ScheduledTransactionPriority + Timestamp uint64 // stored by the contract as a UFix64 with the fractional zeroed out + ExecutionEffort uint64 + Fees uint64 + + TransactionHandlerOwner flow.Address + TransactionHandlerTypeIdentifier string + TransactionHandlerUUID uint64 + TransactionHandlerPublicPath string + + Status ScheduledTransactionStatus + + CreatedTransactionID flow.Identifier + ExecutedTransactionID flow.Identifier + CancelledTransactionID flow.Identifier + + FeesReturned uint64 + FeesDeducted uint64 + + // IsPlaceholder is true if the scheduled transaction was created based on the current chain state, + // and not based on a protocol event. This happens when the index is bootstrapped after the original + // transaction where the scheduled transaction was first created. + // When true, the `CreatedTransactionID` field is undefined. + IsPlaceholder bool + + // Expansion fields populated when expandResults is true. Never persisted. + Transaction *flow.TransactionBody `msgpack:"-"` // Transaction body (nil unless expanded) + Result *TransactionResult `msgpack:"-"` // Transaction result (nil unless expanded) + HandlerContract *Contract `msgpack:"-"` // Handler contract (nil unless expanded) +} + +// ScheduledTransactionCursor identifies a position in the scheduled transaction index for +// cursor-based pagination. It corresponds to the last entry returned in a previous page. +type ScheduledTransactionCursor struct { + ID uint64 // Scheduled transaction ID of the last returned entry +} + +// ScheduledTransactionsPage represents a single page of scheduled transaction results. +type ScheduledTransactionsPage struct { + Transactions []ScheduledTransaction // Results in this page (descending order by ID) + NextCursor *ScheduledTransactionCursor // Cursor to fetch the next page, nil when no more results +} diff --git a/module/state_synchronization/indexer/extended/bootstrap.go b/module/state_synchronization/indexer/extended/bootstrap.go index 13a30f18083..b23231daa61 100644 --- a/module/state_synchronization/indexer/extended/bootstrap.go +++ b/module/state_synchronization/indexer/extended/bootstrap.go @@ -16,6 +16,7 @@ type Storage struct { AccountTransactionsBootstrapper storage.AccountTransactionsBootstrapper FungibleTokenTransfersBootstrapper storage.FungibleTokenTransfersBootstrapper NonFungibleTokenTransfersBootstrapper storage.NonFungibleTokenTransfersBootstrapper + ScheduledTransactionsBootstrapper storage.ScheduledTransactionsIndexBootstrapper } // OpenExtendedIndexDB opens the pebble database for extended indexes and creates the account @@ -62,10 +63,19 @@ func OpenExtendedIndexDB( return Storage{}, fmt.Errorf("could not create non-fungible token transfers index: %w", err) } + scheduledTxStore, err := indexes.NewScheduledTransactionsBootstrapper(indexerStorageDB, sealedRootHeight) + if err != nil { + if closeErr := indexerDB.Close(); closeErr != nil { + log.Error().Err(closeErr).Msg("error closing indexer db") + } + return Storage{}, fmt.Errorf("could not create scheduled transactions index: %w", err) + } + return Storage{ DB: indexerStorageDB, AccountTransactionsBootstrapper: accountTxStore, FungibleTokenTransfersBootstrapper: ftStore, NonFungibleTokenTransfersBootstrapper: nftStore, + ScheduledTransactionsBootstrapper: scheduledTxStore, }, nil } diff --git a/module/state_synchronization/indexer/extended/events/helpers.go b/module/state_synchronization/indexer/extended/events/helpers.go index 0184661f91a..f67f488da1d 100644 --- a/module/state_synchronization/indexer/extended/events/helpers.go +++ b/module/state_synchronization/indexer/extended/events/helpers.go @@ -45,6 +45,21 @@ func AddressFromOptional(opt cadence.Optional) (flow.Address, error) { return flow.BytesToAddress(addr.Bytes()), nil } +// PathFromOptional extracts a path string ("domain/identifier") from a [cadence.Optional] +// containing a [cadence.Path]. Returns "" if the optional is empty. +// +// Any error indicates that the optional value is not a valid path. +func PathFromOptional(opt cadence.Optional) (string, error) { + if opt.Value == nil { + return "", nil + } + path, ok := opt.Value.(cadence.Path) + if !ok { + return "", fmt.Errorf("unexpected type in optional path field: %T", opt.Value) + } + return path.String(), nil +} + // HexToEVMAddress decodes a hex string to an EVM address. // This is the same logic as `common.HexToAddress`, except it returns an error if the hex string is // not valid hex or an incorrect length. diff --git a/module/state_synchronization/indexer/extended/events/helpers_test.go b/module/state_synchronization/indexer/extended/events/helpers_test.go index c58cb1e6906..ceb78a404d2 100644 --- a/module/state_synchronization/indexer/extended/events/helpers_test.go +++ b/module/state_synchronization/indexer/extended/events/helpers_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/encoding/ccf" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -42,6 +43,34 @@ func TestAddressFromOptional(t *testing.T) { }) } +// ========================================================================== +// PathFromOptional Tests +// ========================================================================== + +func TestPathFromOptional(t *testing.T) { + t.Run("valid path", func(t *testing.T) { + path := cadence.Path{Domain: common.PathDomainStorage, Identifier: "flowToken"} + opt := cadence.NewOptional(path) + result, err := PathFromOptional(opt) + require.NoError(t, err) + assert.Equal(t, path.String(), result) + }) + + t.Run("nil optional value", func(t *testing.T) { + opt := cadence.NewOptional(nil) + result, err := PathFromOptional(opt) + require.NoError(t, err) + assert.Equal(t, "", result) + }) + + t.Run("non-path value in optional returns error", func(t *testing.T) { + opt := cadence.NewOptional(cadence.String("not a path")) + _, err := PathFromOptional(opt) + require.Error(t, err) + assert.Contains(t, err.Error(), "unexpected type") + }) +} + // ========================================================================== // DecodePayload Tests // ========================================================================== diff --git a/module/state_synchronization/indexer/extended/events/scheduled_transaction.go b/module/state_synchronization/indexer/extended/events/scheduled_transaction.go new file mode 100644 index 00000000000..a67e0f224f6 --- /dev/null +++ b/module/state_synchronization/indexer/extended/events/scheduled_transaction.go @@ -0,0 +1,187 @@ +package events + +import ( + "fmt" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/model/flow" +) + +// TransactionSchedulerScheduledEvent represents a decoded FlowTransactionScheduler.Scheduled event, +// emitted when a new scheduled transaction is registered. +type TransactionSchedulerScheduledEvent struct { + ID uint64 + Priority uint8 + Timestamp cadence.UFix64 + ExecutionEffort uint64 + Fees cadence.UFix64 + TransactionHandlerOwner flow.Address + TransactionHandlerTypeIdentifier string + TransactionHandlerUUID uint64 + TransactionHandlerPublicPath string // "domain/identifier", or "" if absent +} + +// TransactionSchedulerPendingExecutionEvent represents a decoded FlowTransactionScheduler.PendingExecution event, +// emitted when a scheduled transaction's timestamp is reached and it is ready for execution. +type TransactionSchedulerPendingExecutionEvent struct { + ID uint64 + Priority uint8 + ExecutionEffort uint64 + Fees cadence.UFix64 + TransactionHandlerOwner flow.Address + TransactionHandlerTypeIdentifier string +} + +// TransactionSchedulerExecutedEvent represents a decoded FlowTransactionScheduler.Executed event, +// emitted when a scheduled transaction has been successfully executed. +type TransactionSchedulerExecutedEvent struct { + ID uint64 + Priority uint8 + ExecutionEffort uint64 + TransactionHandlerOwner flow.Address + TransactionHandlerTypeIdentifier string + TransactionHandlerUUID uint64 + TransactionHandlerPublicPath string // "domain/identifier", or "" if absent +} + +// TransactionSchedulerCanceledEvent represents a decoded FlowTransactionScheduler.Canceled event, +// emitted when a scheduled transaction is cancelled by its creator. +type TransactionSchedulerCanceledEvent struct { + ID uint64 + Priority uint8 + FeesReturned cadence.UFix64 + FeesDeducted cadence.UFix64 + TransactionHandlerOwner flow.Address + TransactionHandlerTypeIdentifier string +} + +// DecodeTransactionSchedulerScheduled extracts fields from a FlowTransactionScheduler.Scheduled event. +// +// Any error indicates that the event is malformed. +func DecodeTransactionSchedulerScheduled(event cadence.Event) (*TransactionSchedulerScheduledEvent, error) { + type scheduledEventRaw struct { + ID uint64 `cadence:"id"` + Priority uint8 `cadence:"priority"` + Timestamp cadence.UFix64 `cadence:"timestamp"` + ExecutionEffort uint64 `cadence:"executionEffort"` + Fees cadence.UFix64 `cadence:"fees"` + TransactionHandlerOwner cadence.Address `cadence:"transactionHandlerOwner"` + TransactionHandlerTypeIdentifier string `cadence:"transactionHandlerTypeIdentifier"` + TransactionHandlerUUID uint64 `cadence:"transactionHandlerUUID"` + TransactionHandlerPublicPath cadence.Optional `cadence:"transactionHandlerPublicPath"` + } + + var raw scheduledEventRaw + if err := cadence.DecodeFields(event, &raw); err != nil { + return nil, fmt.Errorf("failed to decode Scheduled event: %w", err) + } + + publicPath, err := PathFromOptional(raw.TransactionHandlerPublicPath) + if err != nil { + return nil, fmt.Errorf("failed to decode Scheduled 'transactionHandlerPublicPath' field: %w", err) + } + + return &TransactionSchedulerScheduledEvent{ + ID: raw.ID, + Priority: raw.Priority, + Timestamp: raw.Timestamp, + ExecutionEffort: raw.ExecutionEffort, + Fees: raw.Fees, + TransactionHandlerOwner: flow.Address(raw.TransactionHandlerOwner), + TransactionHandlerTypeIdentifier: raw.TransactionHandlerTypeIdentifier, + TransactionHandlerUUID: raw.TransactionHandlerUUID, + TransactionHandlerPublicPath: publicPath, + }, nil +} + +// DecodeTransactionSchedulerPendingExecution extracts fields from a FlowTransactionScheduler.PendingExecution event. +// +// Any error indicates that the event is malformed. +func DecodeTransactionSchedulerPendingExecution(event cadence.Event) (*TransactionSchedulerPendingExecutionEvent, error) { + type pendingExecutionEventRaw struct { + ID uint64 `cadence:"id"` + Priority uint8 `cadence:"priority"` + ExecutionEffort uint64 `cadence:"executionEffort"` + Fees cadence.UFix64 `cadence:"fees"` + TransactionHandlerOwner cadence.Address `cadence:"transactionHandlerOwner"` + TransactionHandlerTypeIdentifier string `cadence:"transactionHandlerTypeIdentifier"` + } + + var raw pendingExecutionEventRaw + if err := cadence.DecodeFields(event, &raw); err != nil { + return nil, fmt.Errorf("failed to decode PendingExecution event: %w", err) + } + + return &TransactionSchedulerPendingExecutionEvent{ + ID: raw.ID, + Priority: raw.Priority, + ExecutionEffort: raw.ExecutionEffort, + Fees: raw.Fees, + TransactionHandlerOwner: flow.Address(raw.TransactionHandlerOwner), + TransactionHandlerTypeIdentifier: raw.TransactionHandlerTypeIdentifier, + }, nil +} + +// DecodeTransactionSchedulerExecuted extracts fields from a FlowTransactionScheduler.Executed event. +// +// Any error indicates that the event is malformed. +func DecodeTransactionSchedulerExecuted(event cadence.Event) (*TransactionSchedulerExecutedEvent, error) { + type executedEventRaw struct { + ID uint64 `cadence:"id"` + Priority uint8 `cadence:"priority"` + ExecutionEffort uint64 `cadence:"executionEffort"` + TransactionHandlerOwner cadence.Address `cadence:"transactionHandlerOwner"` + TransactionHandlerTypeIdentifier string `cadence:"transactionHandlerTypeIdentifier"` + TransactionHandlerUUID uint64 `cadence:"transactionHandlerUUID"` + TransactionHandlerPublicPath cadence.Optional `cadence:"transactionHandlerPublicPath"` + } + + var raw executedEventRaw + if err := cadence.DecodeFields(event, &raw); err != nil { + return nil, fmt.Errorf("failed to decode Executed event: %w", err) + } + + publicPath, err := PathFromOptional(raw.TransactionHandlerPublicPath) + if err != nil { + return nil, fmt.Errorf("failed to decode Executed 'transactionHandlerPublicPath' field: %w", err) + } + + return &TransactionSchedulerExecutedEvent{ + ID: raw.ID, + Priority: raw.Priority, + ExecutionEffort: raw.ExecutionEffort, + TransactionHandlerOwner: flow.Address(raw.TransactionHandlerOwner), + TransactionHandlerTypeIdentifier: raw.TransactionHandlerTypeIdentifier, + TransactionHandlerUUID: raw.TransactionHandlerUUID, + TransactionHandlerPublicPath: publicPath, + }, nil +} + +// DecodeTransactionSchedulerCanceled extracts fields from a FlowTransactionScheduler.Canceled event. +// +// Any error indicates that the event is malformed. +func DecodeTransactionSchedulerCanceled(event cadence.Event) (*TransactionSchedulerCanceledEvent, error) { + type canceledEventRaw struct { + ID uint64 `cadence:"id"` + Priority uint8 `cadence:"priority"` + FeesReturned cadence.UFix64 `cadence:"feesReturned"` + FeesDeducted cadence.UFix64 `cadence:"feesDeducted"` + TransactionHandlerOwner cadence.Address `cadence:"transactionHandlerOwner"` + TransactionHandlerTypeIdentifier string `cadence:"transactionHandlerTypeIdentifier"` + } + + var raw canceledEventRaw + if err := cadence.DecodeFields(event, &raw); err != nil { + return nil, fmt.Errorf("failed to decode Canceled event: %w", err) + } + + return &TransactionSchedulerCanceledEvent{ + ID: raw.ID, + Priority: raw.Priority, + FeesReturned: raw.FeesReturned, + FeesDeducted: raw.FeesDeducted, + TransactionHandlerOwner: flow.Address(raw.TransactionHandlerOwner), + TransactionHandlerTypeIdentifier: raw.TransactionHandlerTypeIdentifier, + }, nil +} diff --git a/module/state_synchronization/indexer/extended/mock/script_executor.go b/module/state_synchronization/indexer/extended/mock/script_executor.go new file mode 100644 index 00000000000..54a92740214 --- /dev/null +++ b/module/state_synchronization/indexer/extended/mock/script_executor.go @@ -0,0 +1,118 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + "context" + + mock "github.com/stretchr/testify/mock" +) + +// newScriptExecutor creates a new instance of scriptExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newScriptExecutor(t interface { + mock.TestingT + Cleanup(func()) +}) *scriptExecutor { + mock := &scriptExecutor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// scriptExecutor is an autogenerated mock type for the scriptExecutor type +type scriptExecutor struct { + mock.Mock +} + +type scriptExecutor_Expecter struct { + mock *mock.Mock +} + +func (_m *scriptExecutor) EXPECT() *scriptExecutor_Expecter { + return &scriptExecutor_Expecter{mock: &_m.Mock} +} + +// ExecuteAtBlockHeight provides a mock function for the type scriptExecutor +func (_mock *scriptExecutor) ExecuteAtBlockHeight(ctx context.Context, script []byte, arguments [][]byte, height uint64) ([]byte, error) { + ret := _mock.Called(ctx, script, arguments, height) + + if len(ret) == 0 { + panic("no return value specified for ExecuteAtBlockHeight") + } + + var r0 []byte + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, uint64) ([]byte, error)); ok { + return returnFunc(ctx, script, arguments, height) + } + if returnFunc, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, uint64) []byte); ok { + r0 = returnFunc(ctx, script, arguments, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + if returnFunc, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, uint64) error); ok { + r1 = returnFunc(ctx, script, arguments, height) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// scriptExecutor_ExecuteAtBlockHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecuteAtBlockHeight' +type scriptExecutor_ExecuteAtBlockHeight_Call struct { + *mock.Call +} + +// ExecuteAtBlockHeight is a helper method to define mock.On call +// - ctx context.Context +// - script []byte +// - arguments [][]byte +// - height uint64 +func (_e *scriptExecutor_Expecter) ExecuteAtBlockHeight(ctx interface{}, script interface{}, arguments interface{}, height interface{}) *scriptExecutor_ExecuteAtBlockHeight_Call { + return &scriptExecutor_ExecuteAtBlockHeight_Call{Call: _e.mock.On("ExecuteAtBlockHeight", ctx, script, arguments, height)} +} + +func (_c *scriptExecutor_ExecuteAtBlockHeight_Call) Run(run func(ctx context.Context, script []byte, arguments [][]byte, height uint64)) *scriptExecutor_ExecuteAtBlockHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 context.Context + if args[0] != nil { + arg0 = args[0].(context.Context) + } + var arg1 []byte + if args[1] != nil { + arg1 = args[1].([]byte) + } + var arg2 [][]byte + if args[2] != nil { + arg2 = args[2].([][]byte) + } + var arg3 uint64 + if args[3] != nil { + arg3 = args[3].(uint64) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *scriptExecutor_ExecuteAtBlockHeight_Call) Return(bytes []byte, err error) *scriptExecutor_ExecuteAtBlockHeight_Call { + _c.Call.Return(bytes, err) + return _c +} + +func (_c *scriptExecutor_ExecuteAtBlockHeight_Call) RunAndReturn(run func(ctx context.Context, script []byte, arguments [][]byte, height uint64) ([]byte, error)) *scriptExecutor_ExecuteAtBlockHeight_Call { + _c.Call.Return(run) + return _c +} diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_data.go b/module/state_synchronization/indexer/extended/scheduled_transaction_data.go new file mode 100644 index 00000000000..fd90c8580cc --- /dev/null +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_data.go @@ -0,0 +1,141 @@ +package extended + +import ( + "fmt" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/events" +) + +// getTransactionDataScriptTemplate is a Cadence script template for batch-fetching +// FlowTransactionScheduler.TransactionData by ID. The %s placeholder is replaced with +// the FlowTransactionScheduler contract address hex string. +// +// The script accepts a single [UInt64] argument (the scheduled transaction IDs to fetch) +// and returns [FlowTransactionScheduler.TransactionData?], where each element corresponds +// to the input ID in order. Elements are nil for IDs that do not exist. +const getTransactionDataScriptTemplate = ` +import FlowTransactionScheduler from 0x%s + +/// Returns the TransactionData for each of the given scheduled transaction IDs. +/// Returns nil for any ID that does not exist. +access(all) fun main(ids: [UInt64]): [FlowTransactionScheduler.TransactionData?] { + let results: [FlowTransactionScheduler.TransactionData?] = [] + for id in ids { + results.append(FlowTransactionScheduler.getTransactionData(id: id)) + } + return results +} +` + +// EncodeGetTransactionDataArg encodes a slice of scheduled transaction IDs as a +// JSON-CDC [UInt64] array suitable for passing as the script argument when executing +// a script generated from [getTransactionDataScriptTemplate]. +// +// No error returns are expected during normal operation. +func EncodeGetTransactionDataArg(ids []uint64) ([]byte, error) { + values := make([]cadence.Value, len(ids)) + for i, id := range ids { + values[i] = cadence.UInt64(id) + } + encoded, err := jsoncdc.Encode(cadence.NewArray(values)) + if err != nil { + return nil, fmt.Errorf("failed to JSON-CDC encode IDs array: %w", err) + } + return encoded, nil +} + +// DecodeTransactionDataResults decodes the JSON-CDC response from a batch +// GetTransactionData script execution. The ids slice must match the order of IDs +// passed when the script was called. +// +// Returns a map from scheduled transaction ID to decoded [access.ScheduledTransaction]. +// IDs for which the contract returned nil (not found on-chain) are omitted from the map. +// The returned entries have [access.ScheduledTxStatusScheduled] status, since +// TransactionData reflects the initially scheduled state. +// +// Any error indicates that the response is malformed. +func DecodeTransactionDataResults(response []byte, ids []uint64) (map[uint64]*access.ScheduledTransaction, error) { + value, err := jsoncdc.Decode(nil, response) + if err != nil { + return nil, fmt.Errorf("failed to JSON-CDC decode script result: %w", err) + } + + array, ok := value.(cadence.Array) + if !ok { + return nil, fmt.Errorf("expected Array result, got %T", value) + } + + if len(array.Values) != len(ids) { + return nil, fmt.Errorf("expected %d results, got %d", len(ids), len(array.Values)) + } + + results := make(map[uint64]*access.ScheduledTransaction, len(ids)) + for i, elem := range array.Values { + opt, ok := elem.(cadence.Optional) + if !ok { + return nil, fmt.Errorf("expected Optional at index %d, got %T", i, elem) + } + if opt.Value == nil { + continue + } + + tx, err := decodeTransactionData(opt.Value) + if err != nil { + return nil, fmt.Errorf("failed to decode TransactionData at index %d (id=%d): %w", i, ids[i], err) + } + results[ids[i]] = &tx + } + + return results, nil +} + +// decodeTransactionData decodes a Cadence FlowTransactionScheduler.TransactionData +// struct value into an [access.ScheduledTransaction]. +// +// Any error indicates that the value is malformed. +func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, error) { + type transactionDataRaw struct { + ID uint64 `cadence:"id"` + Priority uint8 `cadence:"priority"` + Timestamp cadence.UFix64 `cadence:"timestamp"` + ExecutionEffort uint64 `cadence:"executionEffort"` + Fees cadence.UFix64 `cadence:"fees"` + TransactionHandlerOwner cadence.Address `cadence:"transactionHandlerOwner"` + TransactionHandlerTypeIdentifier string `cadence:"transactionHandlerTypeIdentifier"` + TransactionHandlerUUID uint64 `cadence:"transactionHandlerUUID"` + TransactionHandlerPublicPath cadence.Optional `cadence:"transactionHandlerPublicPath"` + } + + composite, ok := value.(cadence.Composite) + if !ok { + return access.ScheduledTransaction{}, fmt.Errorf("expected Composite value, got %T", value) + } + + var raw transactionDataRaw + if err := cadence.DecodeFields(composite, &raw); err != nil { + return access.ScheduledTransaction{}, fmt.Errorf("failed to decode TransactionData fields: %w", err) + } + + publicPath, err := events.PathFromOptional(raw.TransactionHandlerPublicPath) + if err != nil { + return access.ScheduledTransaction{}, fmt.Errorf("failed to decode 'transactionHandlerPublicPath' field: %w", err) + } + + return access.ScheduledTransaction{ + ID: raw.ID, + Priority: access.ScheduledTransactionPriority(raw.Priority), + Timestamp: uint64(raw.Timestamp), + ExecutionEffort: raw.ExecutionEffort, + Fees: uint64(raw.Fees), + TransactionHandlerOwner: flow.Address(raw.TransactionHandlerOwner), + TransactionHandlerTypeIdentifier: raw.TransactionHandlerTypeIdentifier, + TransactionHandlerUUID: raw.TransactionHandlerUUID, + TransactionHandlerPublicPath: publicPath, + Status: access.ScheduledTxStatusScheduled, + }, nil +} diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go new file mode 100644 index 00000000000..f42a364c913 --- /dev/null +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go @@ -0,0 +1,253 @@ +package extended_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + . "github.com/onflow/flow-go/module/state_synchronization/indexer/extended" +) + +// ===== EncodeGetTransactionDataArg ===== + +// TestEncodeGetTransactionDataArg_Empty verifies that encoding an empty slice produces a +// valid JSON-CDC array that can be decoded. +func TestEncodeGetTransactionDataArg_Empty(t *testing.T) { + t.Parallel() + + encoded, err := EncodeGetTransactionDataArg(nil) + require.NoError(t, err) + require.NotEmpty(t, encoded) + + value, err := jsoncdc.Decode(nil, encoded) + require.NoError(t, err) + + arr, ok := value.(cadence.Array) + require.True(t, ok) + assert.Empty(t, arr.Values) +} + +// TestEncodeGetTransactionDataArg_NonEmpty verifies that encoding a non-empty slice produces +// a valid JSON-CDC array with the expected UInt64 values. +func TestEncodeGetTransactionDataArg_NonEmpty(t *testing.T) { + t.Parallel() + + ids := []uint64{1, 42, 99} + encoded, err := EncodeGetTransactionDataArg(ids) + require.NoError(t, err) + require.NotEmpty(t, encoded) + + value, err := jsoncdc.Decode(nil, encoded) + require.NoError(t, err) + + arr, ok := value.(cadence.Array) + require.True(t, ok) + require.Len(t, arr.Values, 3) + + assert.Equal(t, cadence.UInt64(1), arr.Values[0]) + assert.Equal(t, cadence.UInt64(42), arr.Values[1]) + assert.Equal(t, cadence.UInt64(99), arr.Values[2]) +} + +// ===== DecodeTransactionDataResults ===== + +// TestDecodeTransactionDataResults_AllFound verifies that when all Optional elements are present +// (non-nil), DecodeTransactionDataResults returns a map with an entry for every ID. +func TestDecodeTransactionDataResults_AllFound(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + + ids := []uint64{5, 7} + comp5 := makeDecodeTransactionDataOptional(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 55) + comp7 := makeDecodeTransactionDataOptional(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler", 77) + + response := encodeOptionalArray(t, comp5, comp7) + + results, err := DecodeTransactionDataResults(response, ids) + require.NoError(t, err) + require.Len(t, results, 2) + + tx5, ok := results[5] + require.True(t, ok) + assert.Equal(t, uint64(5), tx5.ID) + assert.Equal(t, access.ScheduledTxStatusScheduled, tx5.Status) + assert.Equal(t, uint64(55), tx5.TransactionHandlerUUID) + + tx7, ok := results[7] + require.True(t, ok) + assert.Equal(t, uint64(7), tx7.ID) + assert.Equal(t, uint64(77), tx7.TransactionHandlerUUID) +} + +// TestDecodeTransactionDataResults_SomeNil verifies that nil Optional elements are omitted +// from the returned map, while non-nil elements are included. +func TestDecodeTransactionDataResults_SomeNil(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + + ids := []uint64{1, 2, 3} + comp1 := makeDecodeTransactionDataOptional(sc, 1, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1) + nilOpt := cadence.NewOptional(nil) + comp3 := makeDecodeTransactionDataOptional(sc, 3, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 3) + + response := encodeOptionalArray(t, comp1, nilOpt, comp3) + + results, err := DecodeTransactionDataResults(response, ids) + require.NoError(t, err) + require.Len(t, results, 2) + + _, ok := results[2] + assert.False(t, ok, "nil Optional for ID 2 should be omitted") + + _, ok = results[1] + assert.True(t, ok) + _, ok = results[3] + assert.True(t, ok) +} + +// TestDecodeTransactionDataResults_WrongCount verifies that an error is returned when the +// number of results does not match the number of IDs. +func TestDecodeTransactionDataResults_WrongCount(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + + ids := []uint64{1, 2} + // Only one element in the response instead of two. + comp1 := makeDecodeTransactionDataOptional(sc, 1, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1) + response := encodeOptionalArray(t, comp1) + + _, err := DecodeTransactionDataResults(response, ids) + require.Error(t, err) + assert.Contains(t, err.Error(), "expected 2 results, got 1") +} + +// TestDecodeTransactionDataResults_NonArray verifies that an error is returned when the +// response does not decode to a cadence.Array. +func TestDecodeTransactionDataResults_NonArray(t *testing.T) { + t.Parallel() + + // Encode a single UInt64, not an array. + notArray, err := jsoncdc.Encode(cadence.UInt64(42)) + require.NoError(t, err) + + _, err = DecodeTransactionDataResults(notArray, []uint64{1}) + require.Error(t, err) + assert.Contains(t, err.Error(), "expected Array result") +} + +// TestDecodeTransactionDataResults_NonOptionalElement verifies that an error is returned when +// an array element is not a cadence.Optional. +func TestDecodeTransactionDataResults_NonOptionalElement(t *testing.T) { + t.Parallel() + + // Encode an array with a UInt64 instead of Optional. + notOptional := cadence.UInt64(99) + arr := cadence.NewArray([]cadence.Value{notOptional}) + response, err := jsoncdc.Encode(arr) + require.NoError(t, err) + + _, err = DecodeTransactionDataResults(response, []uint64{1}) + require.Error(t, err) + assert.Contains(t, err.Error(), "expected Optional at index 0") +} + +// TestDecodeTransactionDataResults_MalformedComposite verifies that an error is returned when +// a non-nil Optional contains a value that is not a valid TransactionData composite. +func TestDecodeTransactionDataResults_MalformedComposite(t *testing.T) { + t.Parallel() + + // An Optional wrapping a plain UInt64 (not a Composite). + badOpt := cadence.NewOptional(cadence.UInt64(42)) + arr := cadence.NewArray([]cadence.Value{badOpt}) + response, err := jsoncdc.Encode(arr) + require.NoError(t, err) + + _, err = DecodeTransactionDataResults(response, []uint64{1}) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to decode TransactionData at index 0") +} + +// TestDecodeTransactionDataResults_Empty verifies that an empty IDs slice returns an empty map. +func TestDecodeTransactionDataResults_Empty(t *testing.T) { + t.Parallel() + + arr := cadence.NewArray([]cadence.Value{}) + response, err := jsoncdc.Encode(arr) + require.NoError(t, err) + + results, err := DecodeTransactionDataResults(response, []uint64{}) + require.NoError(t, err) + assert.Empty(t, results) +} + +// ===== Test Helpers ===== + +// makeDecodeTransactionDataOptional creates a cadence Optional wrapping a TransactionData +// struct. Used for DecodeTransactionDataResults tests, which expect Optional-wrapped elements. +func makeDecodeTransactionDataOptional( + sc *systemcontracts.SystemContracts, + id uint64, + priority uint8, + timestamp uint64, + executionEffort uint64, + fees uint64, + owner flow.Address, + typeIdentifier string, + uuid uint64, +) cadence.Value { + addr := common.Address(sc.FlowTransactionScheduler.Address) + loc := common.NewAddressLocation(nil, addr, sc.FlowTransactionScheduler.Name) + typ := cadence.NewStructType( + loc, + "TransactionData", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "timestamp", Type: cadence.UFix64Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "fees", Type: cadence.UFix64Type}, + {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, + {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, + {Identifier: "transactionHandlerUUID", Type: cadence.UInt64Type}, + {Identifier: "transactionHandlerPublicPath", Type: cadence.NewOptionalType(cadence.PublicPathType)}, + }, + nil, + ) + comp := cadence.NewStruct([]cadence.Value{ + cadence.UInt64(id), + cadence.UInt8(priority), + cadence.UFix64(timestamp), + cadence.UInt64(executionEffort), + cadence.UFix64(fees), + cadence.NewAddress(owner), + cadence.String(typeIdentifier), + cadence.UInt64(uuid), + cadence.NewOptional(nil), + }).WithType(typ) + return cadence.NewOptional(comp) +} + +// encodeOptionalArray encodes a slice of cadence.Value elements as a JSON-CDC array. +func encodeOptionalArray(t *testing.T, elems ...cadence.Value) []byte { + t.Helper() + arr := cadence.NewArray(elems) + encoded, err := jsoncdc.Encode(arr) + require.NoError(t, err) + return encoded +} diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go new file mode 100644 index 00000000000..b46a56f87ce --- /dev/null +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go @@ -0,0 +1,139 @@ +package extended + +import ( + "context" + "fmt" + "slices" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +const maxLookupBatchSize = 50 + +// scriptExecutor is the subset of module/execution.ScriptExecutor used by ScheduledTransactionRequester. +// Defined locally to avoid an import cycle with module/execution. +type scriptExecutor interface { + ExecuteAtBlockHeight(ctx context.Context, script []byte, arguments [][]byte, height uint64) ([]byte, error) +} + +// ScheduledTransactionRequester fetches scheduled transaction data from on-chain state +// by executing Cadence scripts against the FlowTransactionScheduler contract. +// +// Not safe for concurrent use. +type ScheduledTransactionRequester struct { + executor scriptExecutor + script []byte +} + +// NewScheduledTransactionRequester creates a new ScheduledTransactionRequester. +func NewScheduledTransactionRequester(executor scriptExecutor, chainID flow.ChainID) *ScheduledTransactionRequester { + return &ScheduledTransactionRequester{ + executor: executor, + script: getTransactionDataScript(chainID), + } +} + +// Fetch fetches scheduled transaction data for the given IDs from on-chain state at lookupHeight, +// and applies the status updates from the collected block data. +// +// No error returns are expected during normal operation. +func (r *ScheduledTransactionRequester) Fetch( + ctx context.Context, + lookupIDs []uint64, + lookupHeight uint64, + data *scheduledTransactionData, +) ([]access.ScheduledTransaction, error) { + missingTxs, err := r.fetchMissingTxs(ctx, lookupIDs, lookupHeight) + if err != nil { + return nil, fmt.Errorf("failed to fetch missing scheduled transactions: %w", err) + } + + updatedTxs := make([]access.ScheduledTransaction, 0, len(missingTxs)) + for _, entry := range data.executedEntries { + if missing, ok := missingTxs[entry.event.ID]; ok { + // set IsPlaceholder = true to signal that some information is missing because we don't know the original transaction. + missing.IsPlaceholder = true + missing.Status = access.ScheduledTxStatusExecuted + missing.ExecutedTransactionID = entry.transactionID + updatedTxs = append(updatedTxs, missing) + } + } + for _, entry := range data.canceledEntries { + if missing, ok := missingTxs[entry.event.ID]; ok { + // set IsPlaceholder = true to signal that some information is missing because we don't know the original transaction. + missing.IsPlaceholder = true + missing.Status = access.ScheduledTxStatusCancelled + missing.CancelledTransactionID = entry.transactionID + missing.FeesReturned = uint64(entry.event.FeesReturned) + missing.FeesDeducted = uint64(entry.event.FeesDeducted) + updatedTxs = append(updatedTxs, missing) + } + } + for _, entry := range data.failedEntries { + if missing, ok := missingTxs[entry.scheduledTxID]; ok { + // set IsPlaceholder = true to signal that some information is missing because we don't know the original transaction. + missing.IsPlaceholder = true + missing.Status = access.ScheduledTxStatusFailed + missing.ExecutedTransactionID = entry.transactionID + updatedTxs = append(updatedTxs, missing) + } + } + + if len(updatedTxs) != len(missingTxs) { + return nil, fmt.Errorf("expected %d updated scheduled transactions, got %d", len(missingTxs), len(updatedTxs)) + } + + return updatedTxs, nil +} + +func (r *ScheduledTransactionRequester) fetchMissingTxs( + ctx context.Context, + lookupIDs []uint64, + height uint64, +) (map[uint64]access.ScheduledTransaction, error) { + missingTxs := make(map[uint64]access.ScheduledTransaction, len(lookupIDs)) + + for batch := range slices.Chunk(lookupIDs, maxLookupBatchSize) { + idsArg, err := EncodeGetTransactionDataArg(batch) + if err != nil { + return nil, fmt.Errorf("failed to build arguments: %w", err) + } + + response, err := r.executor.ExecuteAtBlockHeight(ctx, r.script, [][]byte{idsArg}, height) + if err != nil { + return nil, fmt.Errorf("failed to execute at block height: %w", err) + } + + results, err := jsoncdc.Decode(nil, response) + if err != nil { + return nil, fmt.Errorf("failed to decode scheduled transactions: %w", err) + } + + array, ok := results.(cadence.Array) + if !ok { + return nil, fmt.Errorf("expected Array result, got %T", results) + } + + for _, result := range array.Values { + decoded, err := decodeTransactionData(result) + if err != nil { + return nil, fmt.Errorf("failed to decode scheduled transaction: %w", err) + } + missingTxs[decoded.ID] = decoded + } + } + + return missingTxs, nil +} + +// getTransactionDataScript returns the Cadence script used for JIT scheduled transaction +// lookups on the given chain. Exposed for testing. +func getTransactionDataScript(chainID flow.ChainID) []byte { + sc := systemcontracts.SystemContractsForChain(chainID) + return []byte(fmt.Sprintf(getTransactionDataScriptTemplate, sc.FlowTransactionScheduler.Address.Hex())) +} diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go new file mode 100644 index 00000000000..73a0517f4c8 --- /dev/null +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go @@ -0,0 +1,206 @@ +package extended + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + executionmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/events" + "github.com/onflow/flow-go/utils/unittest" +) + +const requesterTestHeight = uint64(200) + +// TestScheduledTransactionRequester_ExecutedEntry verifies that Fetch correctly applies +// Executed status and transaction ID to a fetched scheduled transaction. +func TestScheduledTransactionRequester_ExecutedEntry(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + executorMock := executionmock.NewScriptExecutor(t) + requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) + + executedTxID := unittest.IdentifierFixture() + comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 99) + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, 5), + requesterTestHeight, + ).Return(MakeJITScriptResponse(t, comp), nil).Once() + + data := &scheduledTransactionData{ + executedEntries: []executedEntry{ + { + event: &events.TransactionSchedulerExecutedEvent{ID: 5}, + transactionID: executedTxID, + }, + }, + } + txs, err := requester.Fetch(context.Background(), []uint64{5}, requesterTestHeight, data) + require.NoError(t, err) + require.Len(t, txs, 1) + assert.Equal(t, uint64(5), txs[0].ID) + assert.Equal(t, access.ScheduledTxStatusExecuted, txs[0].Status) + assert.Equal(t, executedTxID, txs[0].ExecutedTransactionID) + assert.Equal(t, uint64(99), txs[0].TransactionHandlerUUID) +} + +// TestScheduledTransactionRequester_CancelledEntry verifies that Fetch correctly applies +// Cancelled status, transaction ID, and fee fields to a fetched scheduled transaction. +func TestScheduledTransactionRequester_CancelledEntry(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + executorMock := executionmock.NewScriptExecutor(t) + requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) + + cancelTxID := unittest.IdentifierFixture() + comp := MakeTransactionDataComposite(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler", 77) + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, 7), + requesterTestHeight, + ).Return(MakeJITScriptResponse(t, comp), nil).Once() + + data := &scheduledTransactionData{ + canceledEntries: []canceledEntry{ + { + event: &events.TransactionSchedulerCanceledEvent{ID: 7, FeesReturned: 50, FeesDeducted: 25}, + transactionID: cancelTxID, + }, + }, + } + txs, err := requester.Fetch(context.Background(), []uint64{7}, requesterTestHeight, data) + require.NoError(t, err) + require.Len(t, txs, 1) + assert.Equal(t, uint64(7), txs[0].ID) + assert.Equal(t, access.ScheduledTxStatusCancelled, txs[0].Status) + assert.Equal(t, cancelTxID, txs[0].CancelledTransactionID) + assert.Equal(t, uint64(50), txs[0].FeesReturned) + assert.Equal(t, uint64(25), txs[0].FeesDeducted) +} + +// TestScheduledTransactionRequester_FailedEntry verifies that Fetch correctly applies +// Failed status and transaction ID to a fetched scheduled transaction. +func TestScheduledTransactionRequester_FailedEntry(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + executorMock := executionmock.NewScriptExecutor(t) + requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) + + executorTxID := unittest.IdentifierFixture() + comp := MakeTransactionDataComposite(sc, 42, 1, 3000, 200, 80, owner, "A.xyz.Contract.Handler", 15) + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, 42), + requesterTestHeight, + ).Return(MakeJITScriptResponse(t, comp), nil).Once() + + data := &scheduledTransactionData{ + failedEntries: []failedEntry{ + {scheduledTxID: 42, transactionID: executorTxID}, + }, + } + txs, err := requester.Fetch(context.Background(), []uint64{42}, requesterTestHeight, data) + require.NoError(t, err) + require.Len(t, txs, 1) + assert.Equal(t, uint64(42), txs[0].ID) + assert.Equal(t, access.ScheduledTxStatusFailed, txs[0].Status) + assert.Equal(t, executorTxID, txs[0].ExecutedTransactionID) +} + +// TestScheduledTransactionRequester_ScriptError verifies that an error from the script +// executor is propagated from Fetch. +func TestScheduledTransactionRequester_ScriptError(t *testing.T) { + t.Parallel() + + executorMock := executionmock.NewScriptExecutor(t) + requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) + + scriptErr := fmt.Errorf("script execution failed") + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, 9), + requesterTestHeight, + ).Return([]byte(nil), scriptErr).Once() + + data := &scheduledTransactionData{ + canceledEntries: []canceledEntry{ + {event: &events.TransactionSchedulerCanceledEvent{ID: 9}}, + }, + } + _, err := requester.Fetch(context.Background(), []uint64{9}, requesterTestHeight, data) + require.Error(t, err) + require.ErrorIs(t, err, scriptErr) +} + +// TestScheduledTransactionRequester_Batching verifies that when more than maxLookupBatchSize +// IDs are requested, multiple script calls are made in batches. +func TestScheduledTransactionRequester_Batching(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + executorMock := executionmock.NewScriptExecutor(t) + requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) + + // maxLookupBatchSize is 50; use 51 IDs to force 2 batches. + const totalIDs = 51 + + var batch1Composites []cadence.Composite + for i := range 50 { + batch1Composites = append(batch1Composites, MakeTransactionDataComposite(sc, uint64(i+1), 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", uint64(i+1))) + } + batch2Composite := MakeTransactionDataComposite(sc, 51, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 51) + + batch1IDs := make([]uint64, 50) + for i := range 50 { + batch1IDs[i] = uint64(i + 1) + } + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, batch1IDs...), + requesterTestHeight, + ).Return(MakeJITScriptResponse(t, batch1Composites...), nil).Once() + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, 51), + requesterTestHeight, + ).Return(MakeJITScriptResponse(t, batch2Composite), nil).Once() + + lookupIDs := make([]uint64, totalIDs) + canceledEntries := make([]canceledEntry, totalIDs) + for i := range totalIDs { + id := uint64(i + 1) + lookupIDs[i] = id + canceledEntries[i] = canceledEntry{ + event: &events.TransactionSchedulerCanceledEvent{ID: id}, + transactionID: unittest.IdentifierFixture(), + } + } + + data := &scheduledTransactionData{canceledEntries: canceledEntries} + txs, err := requester.Fetch(context.Background(), lookupIDs, requesterTestHeight, data) + require.NoError(t, err) + assert.Len(t, txs, totalIDs) +} diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions.go b/module/state_synchronization/indexer/extended/scheduled_transactions.go new file mode 100644 index 00000000000..9ba98f56ffd --- /dev/null +++ b/module/state_synchronization/indexer/extended/scheduled_transactions.go @@ -0,0 +1,398 @@ +package extended + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/events" + "github.com/onflow/flow-go/storage" +) + +const scheduledTransactionsIndexerName = "scheduled_transactions" + +// ScheduledTransactions indexes scheduled transaction lifecycle events from the +// FlowTransactionScheduler system contract. +// +// It processes Scheduled, PendingExecution, Executed, and Canceled events and writes +// corresponding entries to the scheduled transactions storage index. +// +// A scheduled transaction that appeared in a PendingExecution event but has no matching +// Executed event in the same block is considered failed. The corresponding Flow transaction +// that was submitted by the scheduled executor account is identified by its authorizer +// (the scheduled executor account) and an empty payer address. +// +// This indexer will automatically backfill any scheduled transactions that are executed or cancelled +// which were scheduled before the indexer was initialized. This is done by executing scripts when an +// unknown transaction is executed or cancelled within a block. There are a couple important +// considerations to keep in mind: +// 1. If there are many unknown transactions with a block, the script execution may be slow and block +// the indexing process until it completes. Since the extended indexers are run in a batch, this +// will block all other indexers that are indexing the same block. In general, there should be +// relatively few unknown transactions executed. However, if this becomes a problem, we will need +// to consider a more efficient way to backfill the index. +// 2. Since script executions are required to backfill the index, the indexer must be started after +// the registers db is initialized. +// +// Not safe for concurrent use. +type ScheduledTransactions struct { + log zerolog.Logger + store storage.ScheduledTransactionsIndexBootstrapper + + scheduledExecutorAddr flow.Address + + scheduledEventType flow.EventType + pendingExecutionType flow.EventType + executedEventType flow.EventType + canceledEventType flow.EventType + + requester *ScheduledTransactionRequester +} + +var _ Indexer = (*ScheduledTransactions)(nil) + +// scheduledTransactionData collects the data for a block's scheduled transactions. +type scheduledTransactionData struct { + newTxs []access.ScheduledTransaction + executedEntries []executedEntry + canceledEntries []canceledEntry + failedEntries []failedEntry +} + +// executedEntry pairs a decoded Executed event with the Flow transaction ID that emitted it. +type executedEntry struct { + event *events.TransactionSchedulerExecutedEvent + transactionID flow.Identifier +} + +// canceledEntry pairs a decoded Canceled event with the Flow transaction ID that emitted it. +type canceledEntry struct { + event *events.TransactionSchedulerCanceledEvent + transactionID flow.Identifier +} + +// failedEntry pairs a scheduled tx ID with the Flow transaction ID of the executor transaction +// that attempted (and failed) to execute the scheduled transaction. +type failedEntry struct { + scheduledTxID uint64 + transactionID flow.Identifier +} + +// NewScheduledTransactions creates a new ScheduledTransactions indexer. +// +// No error returns are expected during normal operation. +func NewScheduledTransactions( + log zerolog.Logger, + store storage.ScheduledTransactionsIndexBootstrapper, + scriptExecutor scriptExecutor, + chainID flow.ChainID, +) *ScheduledTransactions { + sc := systemcontracts.SystemContractsForChain(chainID) + scheduler := sc.FlowTransactionScheduler + prefix := fmt.Sprintf("A.%s.%s.", scheduler.Address.Hex(), scheduler.Name) + + return &ScheduledTransactions{ + log: log.With().Str("component", "scheduled_tx_indexer").Logger(), + store: store, + requester: NewScheduledTransactionRequester(scriptExecutor, chainID), + scheduledExecutorAddr: sc.ScheduledTransactionExecutor.Address, + scheduledEventType: flow.EventType(prefix + "Scheduled"), + pendingExecutionType: flow.EventType(prefix + "PendingExecution"), + executedEventType: flow.EventType(prefix + "Executed"), + canceledEventType: flow.EventType(prefix + "Canceled"), + } +} + +// Name returns the indexer name. +func (s *ScheduledTransactions) Name() string { return scheduledTransactionsIndexerName } + +// NextHeight returns the next block height to index. +// +// No error returns are expected during normal operation. +func (s *ScheduledTransactions) NextHeight() (uint64, error) { + return nextHeight(s.store) +} + +// IndexBlockData processes one block's events and transactions, and updates the scheduled +// transactions index. +// +// Expected error returns during normal operations: +// - [ErrAlreadyIndexed]: if the data is already indexed for the height +// - [ErrFutureHeight]: if the data is for a future height +func (s *ScheduledTransactions) IndexBlockData(lctx lockctx.Proof, data BlockData, rw storage.ReaderBatchWriter) error { + expectedHeight, err := s.NextHeight() + if err != nil { + return fmt.Errorf("failed to get next height: %w", err) + } + if data.Header.Height > expectedHeight { + return ErrFutureHeight + } + if data.Header.Height < expectedHeight { + return ErrAlreadyIndexed + } + + collected, err := s.collectScheduledTransactionData(data) + if err != nil { + return fmt.Errorf("failed to collect scheduled transaction data: %w", err) + } + + // when a node is bootstrapped after a scheduled transaction was first scheduled, it will not exist + // in the local index. In this case, calls to Executed, Cancelled, and Failed will fail because the + // entry doesn't exist in the db. In practice, this is 100% of nodes since the indexes are reset + // at the beginning of each spork. + // + // The contract doesn't provide a way to query all unexecuted transactions, so we need to find their + // ID first, then query their data. This means it's not possible to backfill the index on startup + // without iterating all possible IDs. + // + // To work around this, the logic that follows performs a just-in-time lookup of the data for each + // unknown transaction that is executed or cancelled within a block. This is one in 3 steps: + // 1. Collect the IDs of all transactions that are not found when attempting to update. + // 2. Execute a script to lookup the data for each ID, and populate the executed/cancelled/failed updates + // 3. Store the updated transactions in the index. + var missingIDs []uint64 + + for _, entry := range collected.executedEntries { + if err := s.store.Executed(lctx, rw, entry.event.ID, entry.transactionID); err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to mark tx %d executed: %w", entry.event.ID, err) + } + missingIDs = append(missingIDs, entry.event.ID) + } + } + for _, entry := range collected.canceledEntries { + if err := s.store.Cancelled(lctx, rw, entry.event.ID, uint64(entry.event.FeesReturned), uint64(entry.event.FeesDeducted), entry.transactionID); err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to mark tx %d cancelled: %w", entry.event.ID, err) + } + missingIDs = append(missingIDs, entry.event.ID) + } + } + for _, entry := range collected.failedEntries { + if err := s.store.Failed(lctx, rw, entry.scheduledTxID, entry.transactionID); err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to mark tx %d failed: %w", entry.scheduledTxID, err) + } + missingIDs = append(missingIDs, entry.scheduledTxID) + } + } + + newTxs := collected.newTxs + if len(missingIDs) > 0 { + // scripts are executed against end of the block state, so the height must be before the current block, + // otherwise the executed/canceled events may not be found. Use one block before the current block. + // This is safe for genesis/spork root blocks because the root block does not execute transactions and + // thus will never have any scheduled transaction events, so the block here will always be after the root block. + missingTxs, err := s.requester.Fetch(context.TODO(), missingIDs, data.Header.Height-1, collected) + if err != nil { + return fmt.Errorf("failed to fetch scheduled transaction data from state: %w", err) + } + + newTxs = append(newTxs, missingTxs...) + } + + // finally store all new transactions in a single call to Store since store may only be called + // once per block. + if err := s.store.Store(lctx, rw, data.Header.Height, newTxs); err != nil { + if !errors.Is(err, storage.ErrAlreadyExists) { + return fmt.Errorf("failed to store new scheduled transactions: %w", err) + } + } + + return nil +} + +// collectScheduledTransactionData collects the scheduled transaction data from the block events. +// +// No error returns are expected during normal operation. +func (s *ScheduledTransactions) collectScheduledTransactionData(data BlockData) (*scheduledTransactionData, error) { + var newTxs []access.ScheduledTransaction + var executedEntries []executedEntry + var canceledEntries []canceledEntry + var failedEntries []failedEntry + + // pendingEventTxIndex is the transaction index of the transaction that emitted the PendingExecution events. + // This is the system transaction that added the scheduled transactions into the system collection. + var pendingEventTxIndex *uint32 + + // pendingIDs tracks the IDs that appear in PendingExecution events so we can match them with Executed events. + // Any missing IDs are considered failed. + pendingIDs := make(map[uint64]struct{}) + + // track which IDs have Scheduled, Canceled, and Executed events to ensure an ID doesn't show up + // more than once in the same block. This should not happen, and the indexer does not handle it. + seenIDs := make(map[uint64]uint32) + checkDuplicate := func(id uint64, eventIndex uint32) error { + if lastID, ok := seenIDs[id]; ok { + return fmt.Errorf("scheduled transaction ID %d appears more than once in block %d (txs %d and %d)", + id, data.Header.Height, lastID, eventIndex) + } + seenIDs[id] = eventIndex + return nil + } + + for _, event := range data.Events { + switch event.Type { + case s.scheduledEventType: + cadenceEvent, err := events.DecodePayload(event) + if err != nil { + return nil, fmt.Errorf("failed to decode Scheduled event payload: %w", err) + } + e, err := events.DecodeTransactionSchedulerScheduled(cadenceEvent) + if err != nil { + return nil, fmt.Errorf("failed to decode Scheduled event: %w", err) + } + + if err := checkDuplicate(e.ID, event.TransactionIndex); err != nil { + return nil, err + } + + newTxs = append(newTxs, access.ScheduledTransaction{ + ID: e.ID, + Priority: access.ScheduledTransactionPriority(e.Priority), + Timestamp: uint64(e.Timestamp), + ExecutionEffort: e.ExecutionEffort, + Fees: uint64(e.Fees), + TransactionHandlerOwner: e.TransactionHandlerOwner, + TransactionHandlerTypeIdentifier: e.TransactionHandlerTypeIdentifier, + TransactionHandlerUUID: e.TransactionHandlerUUID, + TransactionHandlerPublicPath: e.TransactionHandlerPublicPath, + Status: access.ScheduledTxStatusScheduled, + CreatedTransactionID: event.TransactionID, + }) + + case s.pendingExecutionType: + cadenceEvent, err := events.DecodePayload(event) + if err != nil { + return nil, fmt.Errorf("failed to decode PendingExecution event payload: %w", err) + } + e, err := events.DecodeTransactionSchedulerPendingExecution(cadenceEvent) + if err != nil { + return nil, fmt.Errorf("failed to decode PendingExecution event: %w", err) + } + pendingIDs[e.ID] = struct{}{} + if pendingEventTxIndex == nil { + pendingEventTxIndex = &event.TransactionIndex + } + + case s.executedEventType: + cadenceEvent, err := events.DecodePayload(event) + if err != nil { + return nil, fmt.Errorf("failed to decode Executed event payload: %w", err) + } + e, err := events.DecodeTransactionSchedulerExecuted(cadenceEvent) + if err != nil { + return nil, fmt.Errorf("failed to decode Executed event: %w", err) + } + + if err := checkDuplicate(e.ID, event.TransactionIndex); err != nil { + return nil, err + } + + executedEntries = append(executedEntries, executedEntry{event: e, transactionID: event.TransactionID}) + + // sanity check: every Executed event must have a corresponding PendingExecution event. + // otherwise, there is a bug in the indexer, or elsewhere in the system. + if _, ok := pendingIDs[e.ID]; !ok { + return nil, fmt.Errorf("Executed event for tx %d has no corresponding PendingExecution in block %d: protocol invariant violated", + e.ID, data.Header.Height) + } + delete(pendingIDs, e.ID) // remove it so we can find failed transactions + + case s.canceledEventType: + cadenceEvent, err := events.DecodePayload(event) + if err != nil { + return nil, fmt.Errorf("failed to decode Canceled event payload: %w", err) + } + e, err := events.DecodeTransactionSchedulerCanceled(cadenceEvent) + if err != nil { + return nil, fmt.Errorf("failed to decode Canceled event: %w", err) + } + + if err := checkDuplicate(e.ID, event.TransactionIndex); err != nil { + return nil, err + } + + canceledEntries = append(canceledEntries, canceledEntry{event: e, transactionID: event.TransactionID}) + } + } + + // Any remaining pendingIDs were scheduled for execution but not executed — they failed. + if len(pendingIDs) > 0 { + // find the transaction that attempted to execute the scheduled transactions, and mark it as failed. + // start searching from the system transaction that adds the scheduled transactions into the + // system collection to reduce overhead. + for _, tx := range data.Transactions[*pendingEventTxIndex:] { + if !s.isExecutorTransaction(tx) { + continue + } + // the executor transaction must have a scheduled tx ID argument. + if len(tx.Arguments) < 1 { + return nil, fmt.Errorf("executor transaction %s has no scheduled tx ID argument", tx.ID()) + } + + id, err := decodeScheduledTxIDArg(tx.Arguments[0]) + if err != nil { + return nil, fmt.Errorf("failed to decode scheduled tx ID from executor transaction: %w", err) + } + if _, ok := pendingIDs[id]; ok { + failedEntries = append(failedEntries, failedEntry{scheduledTxID: id, transactionID: tx.ID()}) + delete(pendingIDs, id) + } + } + + // sanity check: after matching with the actual transaction in the block, pendingIDs should be empty. + // otherwise, there were pending execution events that did not have a corresponding executor transaction. + // this indicates there is a bug in the indexer, or elsewhere in the system. + if len(pendingIDs) > 0 { + ids := make([]string, 0, len(pendingIDs)) + for id := range pendingIDs { + ids = append(ids, strconv.FormatUint(id, 10)) + } + return nil, fmt.Errorf("PendingExecution tx (%s) have no corresponding executor transaction in block %d", + strings.Join(ids, ", "), data.Header.Height) + } + } + + return &scheduledTransactionData{ + newTxs: newTxs, + executedEntries: executedEntries, + canceledEntries: canceledEntries, + failedEntries: failedEntries, + }, nil +} + +// isExecutorTransaction returns true if the transaction was submitted by the scheduled executor +// account: sole authorizer is the scheduled executor address and payer is the empty address. +func (s *ScheduledTransactions) isExecutorTransaction(tx *flow.TransactionBody) bool { + return tx.Payer == flow.EmptyAddress && + len(tx.Authorizers) == 1 && + tx.Authorizers[0] == s.scheduledExecutorAddr +} + +// decodeScheduledTxIDArg decodes a JSON-CDC encoded UInt64 argument as a scheduled tx ID. +// +// Any error indicates a malformed argument. +func decodeScheduledTxIDArg(arg []byte) (uint64, error) { + value, err := jsoncdc.Decode(nil, arg) + if err != nil { + return 0, fmt.Errorf("failed to JSON-CDC decode argument: %w", err) + } + id, ok := value.(cadence.UInt64) + if !ok { + return 0, fmt.Errorf("expected UInt64 argument, got %T", value) + } + return uint64(id), nil +} diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go new file mode 100644 index 00000000000..e1d67be2c65 --- /dev/null +++ b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go @@ -0,0 +1,1039 @@ +package extended_test + +import ( + "fmt" + "os" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + executionmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/indexes" + "github.com/onflow/flow-go/storage/indexes/iterator" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" + + . "github.com/onflow/flow-go/module/state_synchronization/indexer/extended" +) + +const scheduledTestHeight = uint64(100) + +// TestScheduledTransactionsIndexer_NoEvents verifies that indexing a block with no scheduler +// events stores an empty slice and advances the height. +func TestScheduledTransactionsIndexer_NoEvents(t *testing.T) { + t.Parallel() + + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{}, + }) + + latest, err := store.LatestIndexedHeight() + require.NoError(t, err) + assert.Equal(t, scheduledTestHeight, latest) + + allIter, err := store.All(nil) + require.NoError(t, err) + allTxs, _, err := iterator.CollectResults(allIter, 1000, nil) + require.NoError(t, err) + assert.Empty(t, allTxs) +} + +// TestScheduledTransactionsIndexer_NextHeight_NotBootstrapped verifies that NextHeight returns +// the configured first height before any blocks have been indexed. +func TestScheduledTransactionsIndexer_NextHeight_NotBootstrapped(t *testing.T) { + t.Parallel() + + indexer, _, _, _ := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + height, err := indexer.NextHeight() + require.NoError(t, err) + assert.Equal(t, scheduledTestHeight, height) +} + +// TestScheduledTransactionsIndexer_ScheduledEvent verifies that a Scheduled event creates a new +// entry with status Scheduled and all fields correctly parsed, including ScheduledTransactionID. +func TestScheduledTransactionsIndexer_ScheduledEvent(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + event := createScheduledEvent(t, sc, 1, 3, 1000, 500, 200, owner, "A.1234.SomeContract.Handler", 42, "") + schedulingTxID := unittest.IdentifierFixture() + event.TransactionID = schedulingTxID + + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{event}, + }) + + tx, err := store.ByID(1) + require.NoError(t, err) + assert.Equal(t, uint64(1), tx.ID) + assert.Equal(t, access.ScheduledTransactionPriority(3), tx.Priority) + assert.Equal(t, uint64(1000), tx.Timestamp) + assert.Equal(t, uint64(500), tx.ExecutionEffort) + assert.Equal(t, uint64(200), tx.Fees) + assert.Equal(t, owner, tx.TransactionHandlerOwner) + assert.Equal(t, "A.1234.SomeContract.Handler", tx.TransactionHandlerTypeIdentifier) + assert.Equal(t, uint64(42), tx.TransactionHandlerUUID) + assert.Equal(t, "", tx.TransactionHandlerPublicPath) + assert.Equal(t, access.ScheduledTxStatusScheduled, tx.Status) + assert.Equal(t, schedulingTxID, tx.CreatedTransactionID) +} + +// TestScheduledTransactionsIndexer_ScheduledEventPublicPath verifies that the optional +// transactionHandlerPublicPath field is correctly stored when present. +func TestScheduledTransactionsIndexer_ScheduledEventPublicPath(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + event := createScheduledEvent(t, sc, 1, 1, 1000, 100, 50, owner, "A.abcd.Contract.Handler", 10, "handlerCapability") + + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{event}, + }) + + tx, err := store.ByID(1) + require.NoError(t, err) + assert.Equal(t, "public/handlerCapability", tx.TransactionHandlerPublicPath) +} + +// TestScheduledTransactionsIndexer_ExecutedWithPending verifies that a tx scheduled at height 1 +// and then executed at height 2 (with PendingExecution + Executed) is correctly updated to +// Executed status. Execution must occur in a separate block from scheduling because the storage +// layer reads only committed state. Verifies ExecutedTransactionID is set. +func TestScheduledTransactionsIndexer_ExecutedWithPending(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + owner := unittest.RandomAddressFixture() + + // Height 1: schedule tx with id=5 + header1 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + scheduledEvt := createScheduledEvent(t, sc, 5, 1, 2000, 300, 100, owner, "A.abc.Contract.Handler", 99, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header1, + Events: []flow.Event{scheduledEvt}, + }) + + // Height 2: execute tx with id=5 + executedTxID := unittest.IdentifierFixture() + header2 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + pendingEvt := createPendingExecutionEvent(t, sc, 5, 1, 300, 100, owner, "A.abc.Contract.Handler") + executedEvt := createExecutedEvent(t, sc, 5, 1, 300, owner, "A.abc.Contract.Handler", 99, "") + executedEvt.TransactionID = executedTxID + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header2, + Events: []flow.Event{pendingEvt, executedEvt}, + }) + + tx, err := store.ByID(5) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusExecuted, tx.Status) + assert.Equal(t, uint64(300), tx.ExecutionEffort) + assert.Equal(t, executedTxID, tx.ExecutedTransactionID) +} + +// TestScheduledTransactionsIndexer_CanceledEvent verifies that a Canceled event at height 2 +// updates an entry (created at height 1) to Cancelled status with correct fee fields. +// Verifies CancelledTransactionID is set. +func TestScheduledTransactionsIndexer_CanceledEvent(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + owner := unittest.RandomAddressFixture() + + // Height 1: schedule tx with id=7 + header1 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + scheduledEvt := createScheduledEvent(t, sc, 7, 2, 5000, 400, 150, owner, "A.def.Contract.Handler", 77, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header1, + Events: []flow.Event{scheduledEvt}, + }) + + // Height 2: cancel tx with id=7 + cancelTxID := unittest.IdentifierFixture() + header2 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + canceledEvt := createCanceledEvent(t, sc, 7, 2, 100, 50, owner, "A.def.Contract.Handler") + canceledEvt.TransactionID = cancelTxID + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header2, + Events: []flow.Event{canceledEvt}, + }) + + tx, err := store.ByID(7) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusCancelled, tx.Status) + assert.Equal(t, uint64(100), tx.FeesReturned) + assert.Equal(t, uint64(50), tx.FeesDeducted) + assert.Equal(t, cancelTxID, tx.CancelledTransactionID) +} + +// TestScheduledTransactionsIndexer_FailedTransaction verifies that a scheduled tx with a +// PendingExecution event but no Executed event is marked as Failed when a corresponding +// executor transaction is present in the block. Verifies ExecutedTransactionID is set. +func TestScheduledTransactionsIndexer_FailedTransaction(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + owner := unittest.RandomAddressFixture() + + // Height 1: schedule tx with id=42 + header1 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + scheduledEvt := createScheduledEvent(t, sc, 42, 1, 3000, 200, 80, owner, "A.xyz.Contract.Handler", 15, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header1, + Events: []flow.Event{scheduledEvt}, + }) + + // Height 2: PendingExecution for tx 42, no Executed event. + // The executor transaction attempted to execute the scheduled tx but failed. + header2 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + pendingEvt := createPendingExecutionEvent(t, sc, 42, 1, 200, 80, owner, "A.xyz.Contract.Handler") + executorTx := makeExecutorTransactionBody(t, sc.ScheduledTransactionExecutor.Address, 42) + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header2, + Events: []flow.Event{pendingEvt}, + Transactions: []*flow.TransactionBody{executorTx}, + }) + + tx, err := store.ByID(42) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusFailed, tx.Status) + assert.Equal(t, executorTx.ID(), tx.ExecutedTransactionID) +} + +// TestScheduledTransactionsIndexer_PendingWithoutExecuted verifies that a PendingExecution event +// without either a matching Executed event or a corresponding executor transaction returns an error. +func TestScheduledTransactionsIndexer_PendingWithoutExecuted(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + pendingEvt := createPendingExecutionEvent(t, sc, 10, 1, 300, 100, owner, "A.abc.Contract.Handler") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{pendingEvt}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "have no corresponding executor transaction") +} + +// TestScheduledTransactionsIndexer_ExecutedWithoutPending verifies that an Executed event without +// a matching PendingExecution event returns an error (protocol invariant violation). +func TestScheduledTransactionsIndexer_ExecutedWithoutPending(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + executedEvt := createExecutedEvent(t, sc, 11, 1, 300, owner, "A.abc.Contract.Handler", 55, "") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{executedEvt}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "protocol invariant violated") +} + +// TestScheduledTransactionsIndexer_DuplicateID verifies that having the same scheduled +// transaction ID appear more than once in a block (across Scheduled, Executed, or Canceled +// events) returns an error. +func TestScheduledTransactionsIndexer_DuplicateID(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + + t.Run("duplicate Scheduled events", func(t *testing.T) { + t.Parallel() + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + evt1 := createScheduledEvent(t, sc, 5, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1, "") + evt2 := createScheduledEvent(t, sc, 5, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 2, "") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{evt1, evt2}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "appears more than once") + }) + + t.Run("duplicate Executed events", func(t *testing.T) { + t.Parallel() + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + // One PendingExecution event allows the first Executed to pass the pendingIDs check. + // The second Executed with the same ID is caught by seenIDs before reaching pendingIDs. + pendingEvt := createPendingExecutionEvent(t, sc, 5, 1, 100, 50, owner, "A.abc.Contract.Handler") + executedEvt1 := createExecutedEvent(t, sc, 5, 1, 100, owner, "A.abc.Contract.Handler", 1, "") + executedEvt2 := createExecutedEvent(t, sc, 5, 1, 100, owner, "A.abc.Contract.Handler", 1, "") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{pendingEvt, executedEvt1, executedEvt2}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "appears more than once") + }) + + t.Run("duplicate Canceled events", func(t *testing.T) { + t.Parallel() + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + canceledEvt1 := createCanceledEvent(t, sc, 5, 1, 100, 50, owner, "A.abc.Contract.Handler") + canceledEvt2 := createCanceledEvent(t, sc, 5, 1, 100, 50, owner, "A.abc.Contract.Handler") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{canceledEvt1, canceledEvt2}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "appears more than once") + }) + + t.Run("Scheduled then Executed in same block", func(t *testing.T) { + t.Parallel() + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + scheduledEvt := createScheduledEvent(t, sc, 5, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1, "") + executedEvt := createExecutedEvent(t, sc, 5, 1, 100, owner, "A.abc.Contract.Handler", 1, "") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{scheduledEvt, executedEvt}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "appears more than once") + }) + + t.Run("Scheduled then Canceled in same block", func(t *testing.T) { + t.Parallel() + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + scheduledEvt := createScheduledEvent(t, sc, 5, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1, "") + canceledEvt := createCanceledEvent(t, sc, 5, 1, 100, 50, owner, "A.abc.Contract.Handler") + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{scheduledEvt, canceledEvt}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "appears more than once") + }) +} + +// TestScheduledTransactionsIndexer_AlreadyIndexed verifies that indexing the same height twice +// returns ErrAlreadyIndexed. +func TestScheduledTransactionsIndexer_AlreadyIndexed(t *testing.T) { + t.Parallel() + + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + // First index succeeds + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{}, + }) + + // Second index of same height returns ErrAlreadyIndexed + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{}, + }) + require.ErrorIs(t, err, ErrAlreadyIndexed) +} + +// TestScheduledTransactionsIndexer_FutureHeight verifies that indexing a future height (skipping +// heights) returns ErrFutureHeight. +func TestScheduledTransactionsIndexer_FutureHeight(t *testing.T) { + t.Parallel() + + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+5)) + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{}, + }) + require.ErrorIs(t, err, ErrFutureHeight) +} + +// TestScheduledTransactionsIndexer_MultipleScheduledInOneBlock verifies that multiple Scheduled +// events in a single block are all stored with correct fields. +func TestScheduledTransactionsIndexer_MultipleScheduledInOneBlock(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + evt1 := createScheduledEvent(t, sc, 1, 1, 1000, 100, 10, owner, "A.abc.Contract.HandlerA", 1, "") + evt2 := createScheduledEvent(t, sc, 2, 2, 2000, 200, 20, owner, "A.abc.Contract.HandlerB", 2, "") + evt3 := createScheduledEvent(t, sc, 3, 3, 3000, 300, 30, owner, "A.abc.Contract.HandlerC", 3, "") + + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{evt1, evt2, evt3}, + }) + + allIter2, err := store.All(nil) + require.NoError(t, err) + allTxs2, _, err := iterator.CollectResults(allIter2, 1000, nil) + require.NoError(t, err) + require.Len(t, allTxs2, 3) + + tx1, err := store.ByID(1) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTransactionPriority(1), tx1.Priority) + + tx2, err := store.ByID(2) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTransactionPriority(2), tx2.Priority) + + tx3, err := store.ByID(3) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTransactionPriority(3), tx3.Priority) +} + +// TestScheduledTransactionsIndexer_MultiplePendingExecuted verifies that multiple scheduled txs +// with PendingExecution and Executed events in the same block are all marked as Executed. +func TestScheduledTransactionsIndexer_MultiplePendingExecuted(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + owner := unittest.RandomAddressFixture() + + // Height 1: schedule txs 10 and 11 + header1 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + evt10 := createScheduledEvent(t, sc, 10, 1, 1000, 100, 10, owner, "A.abc.Contract.Handler", 10, "") + evt11 := createScheduledEvent(t, sc, 11, 1, 1000, 200, 10, owner, "A.abc.Contract.Handler", 11, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header1, + Events: []flow.Event{evt10, evt11}, + }) + + // Height 2: execute both txs + header2 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + pending10 := createPendingExecutionEvent(t, sc, 10, 1, 100, 10, owner, "A.abc.Contract.Handler") + pending11 := createPendingExecutionEvent(t, sc, 11, 1, 200, 10, owner, "A.abc.Contract.Handler") + executed10 := createExecutedEvent(t, sc, 10, 1, 100, owner, "A.abc.Contract.Handler", 10, "") + executed11 := createExecutedEvent(t, sc, 11, 1, 200, owner, "A.abc.Contract.Handler", 11, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header2, + Events: []flow.Event{pending10, pending11, executed10, executed11}, + }) + + tx10, err := store.ByID(10) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusExecuted, tx10.Status) + + tx11, err := store.ByID(11) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusExecuted, tx11.Status) +} + +// TestScheduledTransactionsIndexer_MixedFailedAndExecuted verifies that in a block where some +// scheduled txs succeed and others fail, each is correctly marked with the appropriate status. +func TestScheduledTransactionsIndexer_MixedFailedAndExecuted(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + owner := unittest.RandomAddressFixture() + + // Height 1: schedule txs 20 and 21 + header1 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + evt20 := createScheduledEvent(t, sc, 20, 1, 1000, 100, 10, owner, "A.abc.Contract.Handler", 20, "") + evt21 := createScheduledEvent(t, sc, 21, 1, 1000, 150, 10, owner, "A.abc.Contract.Handler", 21, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header1, + Events: []flow.Event{evt20, evt21}, + }) + + // Height 2: tx 20 succeeds, tx 21 fails (executor tx present, no Executed event) + header2 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + pending20 := createPendingExecutionEvent(t, sc, 20, 1, 100, 10, owner, "A.abc.Contract.Handler") + pending21 := createPendingExecutionEvent(t, sc, 21, 1, 150, 10, owner, "A.abc.Contract.Handler") + executed20 := createExecutedEvent(t, sc, 20, 1, 100, owner, "A.abc.Contract.Handler", 20, "") + executorTx21 := makeExecutorTransactionBody(t, sc.ScheduledTransactionExecutor.Address, 21) + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header2, + Events: []flow.Event{pending20, pending21, executed20}, + Transactions: []*flow.TransactionBody{executorTx21}, + }) + + tx20, err := store.ByID(20) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusExecuted, tx20.Status) + + tx21, err := store.ByID(21) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusFailed, tx21.Status) + assert.Equal(t, executorTx21.ID(), tx21.ExecutedTransactionID) +} + +// TestScheduledTransactionsIndexer_NonExecutorTxSkipped verifies that non-executor transactions +// (wrong payer, wrong authorizer, etc.) before the executor transaction are correctly skipped +// when searching for the executor of a failed scheduled transaction. +func TestScheduledTransactionsIndexer_NonExecutorTxSkipped(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, store, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + + owner := unittest.RandomAddressFixture() + + // Height 1: schedule tx with id=30 + header1 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + scheduledEvt := createScheduledEvent(t, sc, 30, 1, 1000, 100, 10, owner, "A.abc.Contract.Handler", 30, "") + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header1, + Events: []flow.Event{scheduledEvt}, + }) + + // Height 2: PendingExecution for tx 30, a non-executor tx, then the real executor tx. + // The non-executor tx has the wrong payer and should be skipped. + header2 := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + pendingEvt := createPendingExecutionEvent(t, sc, 30, 1, 100, 10, owner, "A.abc.Contract.Handler") + nonExecutorTx := &flow.TransactionBody{ + Payer: unittest.RandomAddressFixture(), // wrong payer + Authorizers: []flow.Address{sc.ScheduledTransactionExecutor.Address}, + } + executorTx := makeExecutorTransactionBody(t, sc.ScheduledTransactionExecutor.Address, 30) + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header2, + Events: []flow.Event{pendingEvt}, + Transactions: []*flow.TransactionBody{nonExecutorTx, executorTx}, + }) + + tx, err := store.ByID(30) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusFailed, tx.Status) + assert.Equal(t, executorTx.ID(), tx.ExecutedTransactionID) +} + +// TestScheduledTransactionsIndexer_ExecutorTxNoArguments verifies that an executor transaction +// with no arguments returns an error rather than silently skipping the failed tx. +func TestScheduledTransactionsIndexer_ExecutorTxNoArguments(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + pendingEvt := createPendingExecutionEvent(t, sc, 50, 1, 100, 10, owner, "A.abc.Contract.Handler") + executorTx := &flow.TransactionBody{ + Payer: flow.EmptyAddress, + Authorizers: []flow.Address{sc.ScheduledTransactionExecutor.Address}, + Arguments: nil, + } + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{pendingEvt}, + Transactions: []*flow.TransactionBody{executorTx}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "has no scheduled tx ID argument") +} + +// TestScheduledTransactionsIndexer_ExecutorTxMalformedArg verifies that an executor transaction +// with an argument that cannot be decoded as a UInt64 returns an error. +func TestScheduledTransactionsIndexer_ExecutorTxMalformedArg(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + indexer, _, lm, db := newScheduledTxIndexerForTest(t, flow.Testnet, scheduledTestHeight) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + + owner := unittest.RandomAddressFixture() + pendingEvt := createPendingExecutionEvent(t, sc, 50, 1, 100, 10, owner, "A.abc.Contract.Handler") + + // Valid JSON-CDC encoding but wrong type (String instead of UInt64) + malformedArg, encErr := jsoncdc.Encode(cadence.String("not-a-uint64")) + require.NoError(t, encErr) + executorTx := &flow.TransactionBody{ + Payer: flow.EmptyAddress, + Authorizers: []flow.Address{sc.ScheduledTransactionExecutor.Address}, + Arguments: [][]byte{malformedArg}, + } + + err := indexScheduledBlockExpectError(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{pendingEvt}, + Transactions: []*flow.TransactionBody{executorTx}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to decode scheduled tx ID from executor transaction") +} + +// TestScheduledTransactionsIndexer_NextHeight_MockErrors verifies error propagation from the store. +func TestScheduledTransactionsIndexer_NextHeight_MockErrors(t *testing.T) { + t.Parallel() + + t.Run("unexpected error from LatestIndexedHeight propagates", func(t *testing.T) { + mockStore := storagemock.NewScheduledTransactionsIndexBootstrapper(t) + unexpectedErr := fmt.Errorf("disk I/O failure") + mockStore.On("LatestIndexedHeight").Return(uint64(0), unexpectedErr) + + indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, flow.Testnet) + + _, err := indexer.NextHeight() + require.Error(t, err) + require.ErrorIs(t, err, unexpectedErr) + }) + + t.Run("inconsistent state: not bootstrapped but initialized", func(t *testing.T) { + mockStore := storagemock.NewScheduledTransactionsIndexBootstrapper(t) + mockStore.On("LatestIndexedHeight").Return(uint64(0), storage.ErrNotBootstrapped) + mockStore.On("UninitializedFirstHeight").Return(uint64(42), true) + + indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, flow.Testnet) + + _, err := indexer.NextHeight() + require.Error(t, err) + assert.Contains(t, err.Error(), "but index is initialized") + }) + + t.Run("store error propagates from IndexBlockData", func(t *testing.T) { + const testHeight = uint64(100) + mockStore := storagemock.NewScheduledTransactionsIndexBootstrapper(t) + // LatestIndexedHeight returns testHeight-1, so NextHeight = testHeight + mockStore.On("LatestIndexedHeight").Return(testHeight-1, nil) + storeErr := fmt.Errorf("unexpected storage error") + mockStore.On("Store", mock.Anything, mock.Anything, testHeight, mock.Anything).Return(storeErr) + + lm := storage.NewTestingLockManager() + indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, flow.Testnet) + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(testHeight)) + + err := unittest.WithLock(t, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return indexer.IndexBlockData(lctx, BlockData{Header: header, Events: []flow.Event{}}, nil) + }) + require.Error(t, err) + require.ErrorIs(t, err, storeErr) + }) +} + +// ===== Test Setup Helpers ===== + +// newScheduledTxIndexerForTest creates a ScheduledTransactions indexer backed by a real pebble DB. +func newScheduledTxIndexerForTest( + t *testing.T, + chainID flow.ChainID, + firstHeight uint64, +) (*ScheduledTransactions, storage.ScheduledTransactionsIndexBootstrapper, storage.LockManager, storage.DB) { + pdb, dbDir := unittest.TempPebbleDB(t) + db := pebbleimpl.ToDB(pdb) + t.Cleanup(func() { + require.NoError(t, db.Close()) + require.NoError(t, os.RemoveAll(dbDir)) + }) + + lm := storage.NewTestingLockManager() + store, err := indexes.NewScheduledTransactionsBootstrapper(db, firstHeight) + require.NoError(t, err) + + indexer := NewScheduledTransactions(unittest.Logger(), store, nil, chainID) + return indexer, store, lm, db +} + +// indexScheduledBlock runs IndexBlockData with proper locking and batch commit. +func indexScheduledBlock( + t *testing.T, + indexer *ScheduledTransactions, + lm storage.LockManager, + db storage.DB, + data BlockData, +) { + err := unittest.WithLock(t, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return indexer.IndexBlockData(lctx, data, rw) + }) + }) + require.NoError(t, err) +} + +// indexScheduledBlockExpectError runs IndexBlockData and returns the error. +func indexScheduledBlockExpectError( + t *testing.T, + indexer *ScheduledTransactions, + lm storage.LockManager, + db storage.DB, + data BlockData, +) error { + return unittest.WithLock(t, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return indexer.IndexBlockData(lctx, data, rw) + }) + }) +} + +// ===== JIT Lookup Integration Test ===== + +// TestScheduledTransactionsIndexer_JITLookup verifies the end-to-end JIT path: when +// IndexBlockData encounters an unknown transaction (storage returns ErrNotFound), it +// delegates to the requester, and the result is written to storage. +// The script execution details are covered by TestScheduledTransactionRequester_* tests. +func TestScheduledTransactionsIndexer_JITLookup(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + + scriptExecutor := executionmock.NewScriptExecutor(t) + indexer, store, lm, db := newScheduledTxIndexerWithScriptExecutor(t, flow.Testnet, scheduledTestHeight, scriptExecutor) + + // Bootstrap so Executed/Cancelled/Failed return ErrNotFound on the next block, + // triggering the JIT path. + bootstrapHeader := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight)) + indexScheduledBlock(t, indexer, lm, db, BlockData{Header: bootstrapHeader, Events: []flow.Event{}}) + + executedTxID := unittest.IdentifierFixture() + header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(scheduledTestHeight+1)) + pendingEvt := createPendingExecutionEvent(t, sc, 5, 1, 300, 100, owner, "A.abc.Contract.Handler") + executedEvt := createExecutedEvent(t, sc, 5, 1, 300, owner, "A.abc.Contract.Handler", 99, "") + executedEvt.TransactionID = executedTxID + + scriptHeight := header.Height - 1 + comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 99) + scriptExecutor.On("ExecuteAtBlockHeight", + mock.Anything, mock.Anything, mock.Anything, scriptHeight, + ).Return(MakeJITScriptResponse(t, comp), nil).Once() + + indexScheduledBlock(t, indexer, lm, db, BlockData{ + Header: header, + Events: []flow.Event{pendingEvt, executedEvt}, + }) + + tx, err := store.ByID(5) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusExecuted, tx.Status) + assert.Equal(t, executedTxID, tx.ExecutedTransactionID) +} + +// ===== JIT Lookup Helpers ===== + +// newScheduledTxIndexerWithScriptExecutor creates an indexer backed by a real pebble DB +// with the given script executor. +func newScheduledTxIndexerWithScriptExecutor( + t *testing.T, + chainID flow.ChainID, + firstHeight uint64, + scriptExecutor *executionmock.ScriptExecutor, +) (*ScheduledTransactions, storage.ScheduledTransactionsIndexBootstrapper, storage.LockManager, storage.DB) { + pdb, dbDir := unittest.TempPebbleDB(t) + db := pebbleimpl.ToDB(pdb) + t.Cleanup(func() { + require.NoError(t, db.Close()) + require.NoError(t, os.RemoveAll(dbDir)) + }) + + lm := storage.NewTestingLockManager() + store, err := indexes.NewScheduledTransactionsBootstrapper(db, firstHeight) + require.NoError(t, err) + + indexer := NewScheduledTransactions(unittest.Logger(), store, scriptExecutor, chainID) + return indexer, store, lm, db +} + +// makeExecutorTransactionBody creates a transaction body that matches the executor transaction +// criteria: payer is the zero address, sole authorizer is the executor address, and the first +// argument is a JSON-CDC encoded UInt64 with the given scheduled tx ID. +func makeExecutorTransactionBody(t *testing.T, executorAddr flow.Address, scheduledTxID uint64) *flow.TransactionBody { + t.Helper() + arg, err := jsoncdc.Encode(cadence.UInt64(scheduledTxID)) + require.NoError(t, err) + return &flow.TransactionBody{ + Payer: flow.EmptyAddress, + Authorizers: []flow.Address{executorAddr}, + Arguments: [][]byte{arg}, + } +} + +// schedulerEventType returns the full event type string for the given event name. +func schedulerEventType(sc *systemcontracts.SystemContracts, eventName string) flow.EventType { + return flow.EventType(fmt.Sprintf("A.%s.%s.%s", + sc.FlowTransactionScheduler.Address.Hex(), + sc.FlowTransactionScheduler.Name, + eventName, + )) +} + +// schedulerEventLocation returns the Cadence address location for the scheduler contract. +func schedulerEventLocation(sc *systemcontracts.SystemContracts) common.Location { + addr := common.Address(sc.FlowTransactionScheduler.Address) + return common.NewAddressLocation(nil, addr, sc.FlowTransactionScheduler.Name) +} + +// createScheduledEvent builds a CCF-encoded Scheduled event for the FlowTransactionScheduler. +// If publicPath is empty, the transactionHandlerPublicPath field is set to nil. +func createScheduledEvent( + t *testing.T, + sc *systemcontracts.SystemContracts, + id uint64, + priority uint8, + timestamp uint64, + executionEffort uint64, + fees uint64, + owner flow.Address, + typeIdentifier string, + uuid uint64, + publicPath string, +) flow.Event { + t.Helper() + + var publicPathValue cadence.Value + if publicPath != "" { + path := cadence.MustNewPath(common.PathDomainPublic, publicPath) + publicPathValue = cadence.NewOptional(path) + } else { + publicPathValue = cadence.NewOptional(nil) + } + + location := schedulerEventLocation(sc) + eventCadenceType := cadence.NewEventType( + location, + "Scheduled", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "timestamp", Type: cadence.UFix64Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "fees", Type: cadence.UFix64Type}, + {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, + {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, + {Identifier: "transactionHandlerUUID", Type: cadence.UInt64Type}, + {Identifier: "transactionHandlerPublicPath", Type: cadence.NewOptionalType(cadence.PublicPathType)}, + }, + nil, + ) + + event := cadence.NewEvent([]cadence.Value{ + cadence.UInt64(id), + cadence.UInt8(priority), + cadence.UFix64(timestamp), + cadence.UInt64(executionEffort), + cadence.UFix64(fees), + cadence.NewAddress(owner), + cadence.String(typeIdentifier), + cadence.UInt64(uuid), + publicPathValue, + }).WithType(eventCadenceType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: schedulerEventType(sc, "Scheduled"), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} + +// createPendingExecutionEvent builds a CCF-encoded PendingExecution event. +func createPendingExecutionEvent( + t *testing.T, + sc *systemcontracts.SystemContracts, + id uint64, + priority uint8, + executionEffort uint64, + fees uint64, + owner flow.Address, + typeIdentifier string, +) flow.Event { + t.Helper() + + location := schedulerEventLocation(sc) + eventCadenceType := cadence.NewEventType( + location, + "PendingExecution", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "fees", Type: cadence.UFix64Type}, + {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, + {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, + }, + nil, + ) + + event := cadence.NewEvent([]cadence.Value{ + cadence.UInt64(id), + cadence.UInt8(priority), + cadence.UInt64(executionEffort), + cadence.UFix64(fees), + cadence.NewAddress(owner), + cadence.String(typeIdentifier), + }).WithType(eventCadenceType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: schedulerEventType(sc, "PendingExecution"), + TransactionIndex: 0, + EventIndex: 1, + Payload: payload, + } +} + +// createExecutedEvent builds a CCF-encoded Executed event. +func createExecutedEvent( + t *testing.T, + sc *systemcontracts.SystemContracts, + id uint64, + priority uint8, + executionEffort uint64, + owner flow.Address, + typeIdentifier string, + uuid uint64, + publicPath string, +) flow.Event { + t.Helper() + + var publicPathValue cadence.Value + if publicPath != "" { + path := cadence.MustNewPath(common.PathDomainPublic, publicPath) + publicPathValue = cadence.NewOptional(path) + } else { + publicPathValue = cadence.NewOptional(nil) + } + + location := schedulerEventLocation(sc) + eventCadenceType := cadence.NewEventType( + location, + "Executed", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, + {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, + {Identifier: "transactionHandlerUUID", Type: cadence.UInt64Type}, + {Identifier: "transactionHandlerPublicPath", Type: cadence.NewOptionalType(cadence.PublicPathType)}, + }, + nil, + ) + + event := cadence.NewEvent([]cadence.Value{ + cadence.UInt64(id), + cadence.UInt8(priority), + cadence.UInt64(executionEffort), + cadence.NewAddress(owner), + cadence.String(typeIdentifier), + cadence.UInt64(uuid), + publicPathValue, + }).WithType(eventCadenceType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: schedulerEventType(sc, "Executed"), + TransactionIndex: 0, + EventIndex: 2, + Payload: payload, + } +} + +// createCanceledEvent builds a CCF-encoded Canceled event. +func createCanceledEvent( + t *testing.T, + sc *systemcontracts.SystemContracts, + id uint64, + priority uint8, + feesReturned uint64, + feesDeducted uint64, + owner flow.Address, + typeIdentifier string, +) flow.Event { + t.Helper() + + location := schedulerEventLocation(sc) + eventCadenceType := cadence.NewEventType( + location, + "Canceled", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "feesReturned", Type: cadence.UFix64Type}, + {Identifier: "feesDeducted", Type: cadence.UFix64Type}, + {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, + {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, + }, + nil, + ) + + event := cadence.NewEvent([]cadence.Value{ + cadence.UInt64(id), + cadence.UInt8(priority), + cadence.UFix64(feesReturned), + cadence.UFix64(feesDeducted), + cadence.NewAddress(owner), + cadence.String(typeIdentifier), + }).WithType(eventCadenceType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: schedulerEventType(sc, "Canceled"), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} diff --git a/module/state_synchronization/indexer/extended/test_helpers_test.go b/module/state_synchronization/indexer/extended/test_helpers_test.go new file mode 100644 index 00000000000..1ba5129f28a --- /dev/null +++ b/module/state_synchronization/indexer/extended/test_helpers_test.go @@ -0,0 +1,81 @@ +package extended + +import ( + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +// MakeTransactionDataComposite creates a cadence Struct value representing a +// FlowTransactionScheduler.TransactionData with the given fields. sc is used to +// derive the correct contract address location required for JSON-CDC encoding. +func MakeTransactionDataComposite( + sc *systemcontracts.SystemContracts, + id uint64, + priority uint8, + timestamp uint64, + executionEffort uint64, + fees uint64, + owner flow.Address, + typeIdentifier string, + uuid uint64, +) cadence.Composite { + addr := common.Address(sc.FlowTransactionScheduler.Address) + loc := common.NewAddressLocation(nil, addr, sc.FlowTransactionScheduler.Name) + typ := cadence.NewStructType( + loc, + "TransactionData", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "timestamp", Type: cadence.UFix64Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "fees", Type: cadence.UFix64Type}, + {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, + {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, + {Identifier: "transactionHandlerUUID", Type: cadence.UInt64Type}, + {Identifier: "transactionHandlerPublicPath", Type: cadence.NewOptionalType(cadence.PublicPathType)}, + }, + nil, + ) + return cadence.NewStruct([]cadence.Value{ + cadence.UInt64(id), + cadence.UInt8(priority), + cadence.UFix64(timestamp), + cadence.UInt64(executionEffort), + cadence.UFix64(fees), + cadence.NewAddress(owner), + cadence.String(typeIdentifier), + cadence.UInt64(uuid), + cadence.NewOptional(nil), + }).WithType(typ) +} + +// MakeJITScriptResponse encodes a slice of TransactionData composites as a JSON-CDC array, +// the format returned by a getTransactionData script execution. +func MakeJITScriptResponse(t *testing.T, composites ...cadence.Composite) []byte { + t.Helper() + values := make([]cadence.Value, len(composites)) + for i, c := range composites { + values[i] = c + } + encoded, err := jsoncdc.Encode(cadence.NewArray(values)) + require.NoError(t, err) + return encoded +} + +// encodeUInt64Args returns a slice of JSON-CDC encoded UInt64 values, one per id. +// This mirrors the per-ID encoding used by buildArgs when constructing the +// arguments slice for ExecuteAtBlockHeight. +func encodeUInt64Args(t *testing.T, ids ...uint64) [][]byte { + t.Helper() + args, err := EncodeGetTransactionDataArg(ids) + require.NoError(t, err) + return [][]byte{args} +} diff --git a/storage/account_transactions.go b/storage/account_transactions.go index cea78b78103..fe34ee5b690 100644 --- a/storage/account_transactions.go +++ b/storage/account_transactions.go @@ -45,7 +45,7 @@ type AccountTransactionsRangeReader interface { // AccountTransactionsWriter provides write access to the account transaction index. // -// NOT CONCURRENTLY SAFE. +// NOT CONCURRENCY SAFE. type AccountTransactionsWriter interface { // Store indexes all account-transaction associations for a block. // Must be called sequentially with consecutive heights (latestHeight + 1). diff --git a/storage/account_transfers.go b/storage/account_transfers.go index 14643ffa323..22c84289b27 100644 --- a/storage/account_transfers.go +++ b/storage/account_transfers.go @@ -46,7 +46,7 @@ type FungibleTokenTransfersRangeReader interface { // FungibleTokenTransfersWriter provides write access to the fungible token transfer index. // -// NOT CONCURRENTLY SAFE. +// NOT CONCURRENCY SAFE. type FungibleTokenTransfersWriter interface { // Store indexes all fungible token transfers for a block. // Each transfer is indexed under both the source and recipient addresses. @@ -133,7 +133,7 @@ type NonFungibleTokenTransfersRangeReader interface { // NonFungibleTokenTransfersWriter provides write access to the non-fungible token transfer index. // -// NOT CONCURRENTLY SAFE. +// NOT CONCURRENCY SAFE. type NonFungibleTokenTransfersWriter interface { // Store indexes all non-fungible token transfers for a block. // Each transfer is indexed under both the source and recipient addresses. diff --git a/storage/errors.go b/storage/errors.go index c09de3e90ad..840f50a6bd2 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -34,6 +34,10 @@ var ( // ErrInvalidQuery is returned when parameters passed to a read query are invalid (e.g., startHeight > endHeight). ErrInvalidQuery = errors.New("invalid query") + + // ErrInvalidStatusTransition is returned when a status update is not valid for the + // current state (e.g. executing an already-cancelled scheduled transaction). + ErrInvalidStatusTransition = errors.New("invalid scheduled transaction status transition") ) // InvalidDKGStateTransitionError is a sentinel error that is returned in case an invalid state transition is attempted. diff --git a/storage/indexes/prefix.go b/storage/indexes/prefix.go index 2d63e24efc3..3236328d005 100644 --- a/storage/indexes/prefix.go +++ b/storage/indexes/prefix.go @@ -14,6 +14,8 @@ const ( codeAccountTransactions byte = 10 // Account transactions index codeAccountFungibleTokenTransfers byte = 11 // Account fungible token transfers index codeAccountNonFungibleTokenTransfers byte = 12 // Account non-fungible token transfers index + codeScheduledTransaction byte = 13 // Scheduled transaction index + codeScheduledTransactionByAddress byte = 14 // Scheduled transaction by address // reserved as extension byte for future use _ byte = 255 @@ -33,4 +35,8 @@ var ( // Upper and lower bound keys for account non-fungible token transfers keyAccountNFTTransferLatestHeightKey = []byte{codeIndexProcessedHeightUpperBound, codeAccountNonFungibleTokenTransfers} keyAccountNFTTransferFirstHeightKey = []byte{codeIndexProcessedHeightLowerBound, codeAccountNonFungibleTokenTransfers} + + // Upper and lower bound keys for scheduled transactions + keyScheduledTxLatestHeightKey = []byte{codeIndexProcessedHeightUpperBound, codeScheduledTransaction} + keyScheduledTxFirstHeightKey = []byte{codeIndexProcessedHeightLowerBound, codeScheduledTransaction} ) diff --git a/storage/indexes/scheduled_transactions.go b/storage/indexes/scheduled_transactions.go new file mode 100644 index 00000000000..04480a9670f --- /dev/null +++ b/storage/indexes/scheduled_transactions.go @@ -0,0 +1,380 @@ +package indexes + +import ( + "encoding/binary" + "fmt" + "math" + + "github.com/jordanschalm/lockctx" + "github.com/vmihailenco/msgpack/v4" + + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/indexes/iterator" + "github.com/onflow/flow-go/storage/operation" +) + +const ( + // scheduledTxPrimaryKeyLen is [code(1)][~id(8)] = 9 bytes + scheduledTxPrimaryKeyLen = 1 + 8 + // scheduledTxByAddrKeyLen is [code(1)][address(8)][~id(8)] = 17 bytes + scheduledTxByAddrKeyLen = 1 + flow.AddressLength + 8 +) + +// ScheduledTransactionsIndex implements [storage.ScheduledTransactionsIndex] using Pebble. +// +// Primary key format: [codeScheduledTransaction][~id] → ScheduledTransaction value +// By-address key format: [codeScheduledTransactionByAddress][addr][~id] → nil (key-only) +// +// One's complement of id (~id) gives descending iteration order in ascending byte space. +// +// All read methods are safe for concurrent access. Write methods must be called sequentially. +type ScheduledTransactionsIndex struct { + *IndexState +} + +var _ storage.ScheduledTransactionsIndex = (*ScheduledTransactionsIndex)(nil) + +// NewScheduledTransactionsIndex creates a new index backed by db. +// +// Expected error returns during normal operation: +// - [storage.ErrNotBootstrapped]: if the index has not been initialized +func NewScheduledTransactionsIndex(db storage.DB) (*ScheduledTransactionsIndex, error) { + state, err := NewIndexState( + db, + storage.LockIndexScheduledTransactionsIndex, + keyScheduledTxFirstHeightKey, + keyScheduledTxLatestHeightKey, + ) + if err != nil { + return nil, fmt.Errorf("could not create index state: %w", err) + } + return &ScheduledTransactionsIndex{IndexState: state}, nil +} + +// BootstrapScheduledTransactions initializes the index with the given start height and initial +// scheduled transactions, and returns a new [ScheduledTransactionsIndex]. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until the batch +// is committed. +// +// Expected error returns during normal operation: +// - [storage.ErrAlreadyExists]: if any bounds key already exists +func BootstrapScheduledTransactions( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + db storage.DB, + initialStartHeight uint64, + scheduledTxs []access.ScheduledTransaction, +) (*ScheduledTransactionsIndex, error) { + state, err := BootstrapIndexState( + lctx, + rw, + db, + storage.LockIndexScheduledTransactionsIndex, + keyScheduledTxFirstHeightKey, + keyScheduledTxLatestHeightKey, + initialStartHeight, + ) + if err != nil { + return nil, fmt.Errorf("could not bootstrap scheduled transactions: %w", err) + } + + if err := storeAllScheduledTransactions(rw, scheduledTxs); err != nil { + return nil, fmt.Errorf("could not store scheduled transactions: %w", err) + } + + return &ScheduledTransactionsIndex{IndexState: state}, nil +} + +// ByID returns the scheduled transaction with the given ID. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if no scheduled transaction with the given ID exists +func (idx *ScheduledTransactionsIndex) ByID(id uint64) (access.ScheduledTransaction, error) { + var tx access.ScheduledTransaction + if err := operation.RetrieveByKey(idx.db.Reader(), makeScheduledTxPrimaryKey(id), &tx); err != nil { + return access.ScheduledTransaction{}, fmt.Errorf("could not retrieve scheduled transaction %d: %w", id, err) + } + return tx, nil +} + +// All returns an iterator over all scheduled transactions in descending ID order. +// Returns an exhausted iterator and no error if no transactions exist. +// +// `cursor` is a pointer to an [access.ScheduledTransactionCursor]: +// - nil means start from the highest indexed ID +// - non-nil means start at the cursor ID (inclusive) +// +// No error returns are expected during normal operation. +func (idx *ScheduledTransactionsIndex) All( + cursor *access.ScheduledTransactionCursor, +) (storage.ScheduledTransactionIterator, error) { + startKey := makeScheduledTxPrimaryKey(math.MaxUint64) + if cursor != nil { + startKey = makeScheduledTxPrimaryKey(cursor.ID) + } + endKey := makeScheduledTxPrimaryKey(0) + + reader := idx.db.Reader() + iter, err := reader.NewIter(startKey, endKey, storage.DefaultIteratorOptions()) + if err != nil { + return nil, fmt.Errorf("could not create iterator: %w", err) + } + + return iterator.Build(iter, decodeScheduledTxCursor, reconstructScheduledTx), nil +} + +// ByAddress returns an iterator over scheduled transactions for the given account in +// descending ID order. Returns an exhausted iterator and no error if the account has no +// transactions. +// +// `cursor` is a pointer to an [access.ScheduledTransactionCursor]: +// - nil means start from the highest indexed ID +// - non-nil means start at the cursor ID (inclusive) +// +// No error returns are expected during normal operation. +func (idx *ScheduledTransactionsIndex) ByAddress( + account flow.Address, + cursor *access.ScheduledTransactionCursor, +) (storage.ScheduledTransactionIterator, error) { + startKey := makeScheduledTxByAddrKey(account, math.MaxUint64) + if cursor != nil { + startKey = makeScheduledTxByAddrKey(account, cursor.ID) + } + endKey := makeScheduledTxByAddrKey(account, 0) + + reader := idx.db.Reader() + iter, err := reader.NewIter(startKey, endKey, storage.DefaultIteratorOptions()) + if err != nil { + return nil, fmt.Errorf("could not create iterator: %w", err) + } + + // The by-address index is key-only (nil values). The getValue closure performs + // a secondary lookup into the primary index using the decoded cursor's ID. + getValue := func(cur access.ScheduledTransactionCursor, _ []byte, dest *access.ScheduledTransaction) error { + return operation.RetrieveByKey(reader, makeScheduledTxPrimaryKey(cur.ID), dest) + } + + return iterator.Build(iter, decodeScheduledTxByAddrCursor, getValue), nil +} + +// Store indexes new scheduled transactions from the block and advances the latest indexed height. +// Must be called with consecutive block heights. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until committed. +// +// Expected error returns during normal operation: +// - [storage.ErrAlreadyExists]: if blockHeight is already indexed +func (idx *ScheduledTransactionsIndex) Store( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + blockHeight uint64, + scheduledTxs []access.ScheduledTransaction, +) error { + if err := idx.PrepareStore(lctx, rw, blockHeight); err != nil { + return fmt.Errorf("could not prepare store for block %d: %w", blockHeight, err) + } + + return storeAllScheduledTransactions(rw, scheduledTxs) +} + +// storeAllScheduledTransactions writes all scheduled transaction entries to the batch. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until the batch +// is committed. +// +// Expected error returns during normal operation: +// - [storage.ErrAlreadyExists]: if any scheduled transaction ID already exists +func storeAllScheduledTransactions(rw storage.ReaderBatchWriter, scheduledTxs []access.ScheduledTransaction) error { + writer := rw.Writer() + for _, tx := range scheduledTxs { + primaryKey := makeScheduledTxPrimaryKey(tx.ID) + + exists, err := operation.KeyExists(rw.GlobalReader(), primaryKey) + if err != nil { + return fmt.Errorf("could not check key for tx %d: %w", tx.ID, err) + } + if exists { + return fmt.Errorf("scheduled transaction %d already exists: %w", tx.ID, storage.ErrAlreadyExists) + } + + if err := operation.UpsertByKey(writer, primaryKey, tx); err != nil { + return fmt.Errorf("could not store tx %d: %w", tx.ID, err) + } + if err := operation.UpsertByKey(writer, makeScheduledTxByAddrKey(tx.TransactionHandlerOwner, tx.ID), nil); err != nil { + return fmt.Errorf("could not store by-address key for tx %d: %w", tx.ID, err) + } + } + return nil +} + +// Executed updates the transaction's status to Executed and records the ID of the +// transaction that emitted the Executed event. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until committed. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if no entry with the given ID exists +// - [storage.ErrInvalidStatusTransition]: if the transaction is already Cancelled, Executed, or Failed +func (idx *ScheduledTransactionsIndex) Executed( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + scheduledTxID uint64, + transactionID flow.Identifier, +) error { + if !lctx.HoldsLock(storage.LockIndexScheduledTransactionsIndex) { + return fmt.Errorf("missing required lock: %s", storage.LockIndexScheduledTransactionsIndex) + } + + key := makeScheduledTxPrimaryKey(scheduledTxID) + var tx access.ScheduledTransaction + if err := operation.RetrieveByKey(rw.GlobalReader(), key, &tx); err != nil { + return fmt.Errorf("could not retrieve scheduled transaction %d: %w", scheduledTxID, err) + } + if tx.Status != access.ScheduledTxStatusScheduled { + return fmt.Errorf("tx %d already in terminal state %s: %w", scheduledTxID, tx.Status, storage.ErrInvalidStatusTransition) + } + + tx.Status = access.ScheduledTxStatusExecuted + tx.ExecutedTransactionID = transactionID + + if err := operation.UpsertByKey(rw.Writer(), key, tx); err != nil { + return fmt.Errorf("could not update scheduled transaction %d: %w", scheduledTxID, err) + } + return nil +} + +// Cancelled updates the transaction's status to Cancelled and records fee amounts +// and the ID of the transaction that emitted the Canceled event. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until committed. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if no entry with the given ID exists +// - [storage.ErrInvalidStatusTransition]: if the transaction is already Executed, Cancelled, or Failed +func (idx *ScheduledTransactionsIndex) Cancelled( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + scheduledTxID uint64, + feesReturned uint64, + feesDeducted uint64, + transactionID flow.Identifier, +) error { + if !lctx.HoldsLock(storage.LockIndexScheduledTransactionsIndex) { + return fmt.Errorf("missing required lock: %s", storage.LockIndexScheduledTransactionsIndex) + } + + key := makeScheduledTxPrimaryKey(scheduledTxID) + var tx access.ScheduledTransaction + if err := operation.RetrieveByKey(rw.GlobalReader(), key, &tx); err != nil { + return fmt.Errorf("could not retrieve scheduled transaction %d: %w", scheduledTxID, err) + } + if tx.Status != access.ScheduledTxStatusScheduled { + return fmt.Errorf("tx %d already in terminal state %s: %w", scheduledTxID, tx.Status, storage.ErrInvalidStatusTransition) + } + + tx.Status = access.ScheduledTxStatusCancelled + tx.FeesReturned = feesReturned + tx.FeesDeducted = feesDeducted + tx.CancelledTransactionID = transactionID + + if err := operation.UpsertByKey(rw.Writer(), key, tx); err != nil { + return fmt.Errorf("could not update scheduled transaction %d: %w", scheduledTxID, err) + } + return nil +} + +// Failed updates the transaction's status to Failed and records the ID of the executor +// transaction that attempted (and failed) to execute the scheduled transaction. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until committed. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if no entry with the given ID exists +// - [storage.ErrInvalidStatusTransition]: if the transaction is already Executed, Cancelled, or Failed +func (idx *ScheduledTransactionsIndex) Failed( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + scheduledTxID uint64, + transactionID flow.Identifier, +) error { + if !lctx.HoldsLock(storage.LockIndexScheduledTransactionsIndex) { + return fmt.Errorf("missing required lock: %s", storage.LockIndexScheduledTransactionsIndex) + } + + key := makeScheduledTxPrimaryKey(scheduledTxID) + + var tx access.ScheduledTransaction + if err := operation.RetrieveByKey(rw.GlobalReader(), key, &tx); err != nil { + return fmt.Errorf("could not retrieve scheduled transaction %d: %w", scheduledTxID, err) + } + if tx.Status != access.ScheduledTxStatusScheduled { + return fmt.Errorf("tx %d already in terminal state %s: %w", scheduledTxID, tx.Status, storage.ErrInvalidStatusTransition) + } + + tx.Status = access.ScheduledTxStatusFailed + tx.ExecutedTransactionID = transactionID + + if err := operation.UpsertByKey(rw.Writer(), key, tx); err != nil { + return fmt.Errorf("could not update scheduled transaction %d: %w", scheduledTxID, err) + } + return nil +} + +// reconstructScheduledTx decodes a msgpack-encoded value into a [access.ScheduledTransaction]. +// +// Any error indicates a malformed value. +func reconstructScheduledTx(_ access.ScheduledTransactionCursor, value []byte, dest *access.ScheduledTransaction) error { + return msgpack.Unmarshal(value, dest) +} + +// makeScheduledTxPrimaryKey creates a primary key [code][~id]. +// One's complement ensures higher IDs sort first during forward iteration. +func makeScheduledTxPrimaryKey(id uint64) []byte { + key := make([]byte, scheduledTxPrimaryKeyLen) + key[0] = codeScheduledTransaction + binary.BigEndian.PutUint64(key[1:], ^id) + return key +} + +// makeScheduledTxByAddrKey creates a by-address key [code][address][~id]. +func makeScheduledTxByAddrKey(addr flow.Address, id uint64) []byte { + key := make([]byte, scheduledTxByAddrKeyLen) + key[0] = codeScheduledTransactionByAddress + copy(key[1:1+flow.AddressLength], addr[:]) + binary.BigEndian.PutUint64(key[1+flow.AddressLength:], ^id) + return key +} + +// decodeScheduledTxCursor decodes a primary key and returns the ID. +// +// Any error indicates a malformed key. +func decodeScheduledTxCursor(key []byte) (access.ScheduledTransactionCursor, error) { + if len(key) != scheduledTxPrimaryKeyLen { + return access.ScheduledTransactionCursor{}, fmt.Errorf("invalid primary key length: expected %d, got %d", scheduledTxPrimaryKeyLen, len(key)) + } + if key[0] != codeScheduledTransaction { + return access.ScheduledTransactionCursor{}, fmt.Errorf("invalid prefix: expected %d, got %d", codeScheduledTransaction, key[0]) + } + return access.ScheduledTransactionCursor{ + ID: ^binary.BigEndian.Uint64(key[1:]), + }, nil +} + +// decodeScheduledTxByAddrCursor decodes a by-address key and returns the cursor ID. +// +// Any error indicates a malformed key. +func decodeScheduledTxByAddrCursor(key []byte) (access.ScheduledTransactionCursor, error) { + if len(key) != scheduledTxByAddrKeyLen { + return access.ScheduledTransactionCursor{}, fmt.Errorf( + "invalid by-address key length: expected %d, got %d", scheduledTxByAddrKeyLen, len(key)) + } + if key[0] != codeScheduledTransactionByAddress { + return access.ScheduledTransactionCursor{}, fmt.Errorf( + "invalid prefix: expected %d, got %d", codeScheduledTransactionByAddress, key[0]) + } + + // skip prefix and address + offset := 1 + flow.AddressLength + id := ^binary.BigEndian.Uint64(key[offset:]) + + return access.ScheduledTransactionCursor{ + ID: id, + }, nil +} diff --git a/storage/indexes/scheduled_transactions_bootstrapper.go b/storage/indexes/scheduled_transactions_bootstrapper.go new file mode 100644 index 00000000000..032675b00fb --- /dev/null +++ b/storage/indexes/scheduled_transactions_bootstrapper.go @@ -0,0 +1,234 @@ +package indexes + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// ScheduledTransactionsBootstrapper wraps a [ScheduledTransactionsIndex] and performs +// just-in-time initialization of the index when the initial block is provided. +// +// Scheduled transactions may not be available for the root block during bootstrapping. +// This struct acts as a proxy for the underlying [ScheduledTransactionsIndex] and +// encapsulates the complexity of initializing the index when the initial block is eventually provided. +type ScheduledTransactionsBootstrapper struct { + db storage.DB + initialStartHeight uint64 + + store *atomic.Pointer[ScheduledTransactionsIndex] +} + +var _ storage.ScheduledTransactionsIndexBootstrapper = (*ScheduledTransactionsBootstrapper)(nil) + +// NewScheduledTransactionsBootstrapper creates a new scheduled transactions bootstrapper. +// +// No error returns are expected during normal operation. +func NewScheduledTransactionsBootstrapper(db storage.DB, initialStartHeight uint64) (*ScheduledTransactionsBootstrapper, error) { + store, err := NewScheduledTransactionsIndex(db) + if err != nil { + if !errors.Is(err, storage.ErrNotBootstrapped) { + return nil, fmt.Errorf("could not create scheduled transactions index: %w", err) + } + // make sure it's nil + store = nil + } + + return &ScheduledTransactionsBootstrapper{ + db: db, + initialStartHeight: initialStartHeight, + store: atomic.NewPointer(store), + }, nil +} + +// FirstIndexedHeight returns the first (oldest) block height that has been indexed. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +func (b *ScheduledTransactionsBootstrapper) FirstIndexedHeight() (uint64, error) { + store := b.store.Load() + if store == nil { + return 0, storage.ErrNotBootstrapped + } + return store.FirstIndexedHeight(), nil +} + +// LatestIndexedHeight returns the latest block height that has been indexed. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +func (b *ScheduledTransactionsBootstrapper) LatestIndexedHeight() (uint64, error) { + store := b.store.Load() + if store == nil { + return 0, storage.ErrNotBootstrapped + } + return store.LatestIndexedHeight(), nil +} + +// UninitializedFirstHeight returns the height the index will accept as the first height, and a boolean +// indicating if the index is initialized. +// If the index is not initialized, the first call to `Store` must include data for this height. +func (b *ScheduledTransactionsBootstrapper) UninitializedFirstHeight() (uint64, bool) { + store := b.store.Load() + if store == nil { + return b.initialStartHeight, false + } + return store.FirstIndexedHeight(), true +} + +// ByID returns the scheduled transaction with the given scheduler-assigned ID. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +// - [storage.ErrNotFound] if no scheduled transaction with the given ID exists +func (b *ScheduledTransactionsBootstrapper) ByID(id uint64) (access.ScheduledTransaction, error) { + store := b.store.Load() + if store == nil { + return access.ScheduledTransaction{}, storage.ErrNotBootstrapped + } + return store.ByID(id) +} + +// ByAddress returns an iterator over scheduled transactions for the given account. +// See [ScheduledTransactionsIndex.ByAddress] for full documentation. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +func (b *ScheduledTransactionsBootstrapper) ByAddress( + account flow.Address, + cursor *access.ScheduledTransactionCursor, +) (storage.ScheduledTransactionIterator, error) { + store := b.store.Load() + if store == nil { + return nil, storage.ErrNotBootstrapped + } + return store.ByAddress(account, cursor) +} + +// All returns an iterator over all scheduled transactions. +// See [ScheduledTransactionsIndex.All] for full documentation. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +func (b *ScheduledTransactionsBootstrapper) All( + cursor *access.ScheduledTransactionCursor, +) (storage.ScheduledTransactionIterator, error) { + store := b.store.Load() + if store == nil { + return nil, storage.ErrNotBootstrapped + } + return store.All(cursor) +} + +// Store indexes all new scheduled transactions from the given block. +// Must be called sequentially with consecutive heights (latestHeight + 1). +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until the batch is committed. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized and the provided block height +// is not the initial start height +// - [storage.ErrAlreadyExists] if the block height is already indexed +func (b *ScheduledTransactionsBootstrapper) Store( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + blockHeight uint64, + scheduledTxs []access.ScheduledTransaction, +) error { + // if the index is already initialized, store the data directly + if store := b.store.Load(); store != nil { + return store.Store(lctx, rw, blockHeight, scheduledTxs) + } + + // otherwise bootstrap the index. this will store the data during initialization + if blockHeight != b.initialStartHeight { + return fmt.Errorf("expected first indexed height %d, got %d: %w", b.initialStartHeight, blockHeight, storage.ErrNotBootstrapped) + } + + store, err := BootstrapScheduledTransactions(lctx, rw, b.db, b.initialStartHeight, scheduledTxs) + if err != nil { + return fmt.Errorf("could not initialize scheduled transactions storage: %w", err) + } + + if !b.store.CompareAndSwap(nil, store) { + // this should never happen. if it does, there is a bug. this indicates another goroutine + // successfully initialized `store` since we checked the value above. since the bootstrap + // operation is protected by the lock and it performs sanity checks to ensure the table + // is actually empty, the bootstrap operation should fail if there was concurrent access. + return fmt.Errorf("scheduled transactions initialized during bootstrap") + } + + return nil +} + +// Executed updates the scheduled transaction's status to Executed and records the ID of the transaction +// that emitted the Executed event. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until the batch +// is committed. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +// - [storage.ErrNotFound] if no scheduled transaction with the given ID exists +// - [storage.ErrInvalidStatusTransition] if the transaction is already Executed, Cancelled, or Failed +func (b *ScheduledTransactionsBootstrapper) Executed( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + scheduledTxID uint64, + transactionID flow.Identifier, +) error { + store := b.store.Load() + if store == nil { + return storage.ErrNotBootstrapped + } + return store.Executed(lctx, rw, scheduledTxID, transactionID) +} + +// Cancelled updates the scheduled transaction's status to Cancelled and records the +// fee amounts and the ID of the transaction that emitted the Canceled event. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until the batch +// is committed. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +// - [storage.ErrNotFound] if no scheduled transaction with the given ID exists +// - [storage.ErrInvalidStatusTransition] if the transaction is already Executed, Cancelled, or Failed +func (b *ScheduledTransactionsBootstrapper) Cancelled( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + scheduledTxID uint64, + feesReturned uint64, + feesDeducted uint64, + transactionID flow.Identifier, +) error { + store := b.store.Load() + if store == nil { + return storage.ErrNotBootstrapped + } + return store.Cancelled(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) +} + +// Failed updates the transaction's status to Failed and records the ID of the transaction +// that emitted the Failed event. +// The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until committed. +// +// Expected error returns during normal operations: +// - [storage.ErrNotBootstrapped] if the index has not been initialized +// - [storage.ErrNotFound] if no scheduled transaction with the given ID exists +// - [storage.ErrInvalidStatusTransition] if the transaction is already Executed, Cancelled, or Failed +func (b *ScheduledTransactionsBootstrapper) Failed( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + scheduledTxID uint64, + transactionID flow.Identifier, +) error { + store := b.store.Load() + if store == nil { + return storage.ErrNotBootstrapped + } + return store.Failed(lctx, rw, scheduledTxID, transactionID) +} diff --git a/storage/indexes/scheduled_transactions_bootstrapper_test.go b/storage/indexes/scheduled_transactions_bootstrapper_test.go new file mode 100644 index 00000000000..f1563172c2b --- /dev/null +++ b/storage/indexes/scheduled_transactions_bootstrapper_test.go @@ -0,0 +1,287 @@ +package indexes_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/indexes" + "github.com/onflow/flow-go/storage/indexes/iterator" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +// storeBootstrapperScheduledTx is a helper that calls Store on the bootstrapper under the required lock. +func storeBootstrapperScheduledTx( + tb testing.TB, + store storage.ScheduledTransactionsIndexBootstrapper, + db storage.DB, + height uint64, + txs []access.ScheduledTransaction, +) error { + tb.Helper() + lockManager := storage.NewTestingLockManager() + return unittest.WithLock(tb, lockManager, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.Store(lctx, rw, height, txs) + }) + }) +} + +// openPebbleScheduledTxDB opens a pebble DB at dir for use in persistence tests. +func openPebbleScheduledTxDB(tb testing.TB, dir string) storage.DB { + tb.Helper() + pdb, err := pebble.Open(dir, &pebble.Options{}) + require.NoError(tb, err) + return pebbleimpl.ToDB(pdb) +} + +// collectBootstrapperAll collects results from the bootstrapper All method. +func collectBootstrapperAll(tb testing.TB, store storage.ScheduledTransactionsIndexBootstrapper, limit uint32, cursor *access.ScheduledTransactionCursor) ([]access.ScheduledTransaction, *access.ScheduledTransactionCursor) { + tb.Helper() + iter, err := store.All(cursor) + require.NoError(tb, err) + collected, nextCursor, err := iterator.CollectResults(iter, limit, nil) + require.NoError(tb, err) + return collected, nextCursor +} + +// collectBootstrapperByAddress collects results from the bootstrapper ByAddress method. +func collectBootstrapperByAddress(tb testing.TB, store storage.ScheduledTransactionsIndexBootstrapper, addr access.ScheduledTransaction, limit uint32, cursor *access.ScheduledTransactionCursor) ([]access.ScheduledTransaction, *access.ScheduledTransactionCursor) { + tb.Helper() + iter, err := store.ByAddress(addr.TransactionHandlerOwner, cursor) + require.NoError(tb, err) + collected, nextCursor, err := iterator.CollectResults(iter, limit, nil) + require.NoError(tb, err) + return collected, nextCursor +} + +func TestScheduledTransactionsBootstrapper_Uninitialized_Reads(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 10) + require.NoError(t, err) + + _, err = store.ByID(1) + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + + _, err = store.ByAddress(unittest.RandomAddressFixture(), nil) + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + + _, err = store.All(nil) + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + }) +} + +func TestScheduledTransactionsBootstrapper_FirstStore_WrongHeight(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 10) + require.NoError(t, err) + + err = storeBootstrapperScheduledTx(t, store, storageDB, 11, nil) + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + + err = storeBootstrapperScheduledTx(t, store, storageDB, 9, nil) + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + }) +} + +func TestScheduledTransactionsBootstrapper_FirstStore_BootstrapsAndSucceeds(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 10) + require.NoError(t, err) + + addr := unittest.RandomAddressFixture() + txs := []access.ScheduledTransaction{makeScheduledTx(1, addr)} + + err = storeBootstrapperScheduledTx(t, store, storageDB, 10, txs) + require.NoError(t, err) + + // reads should work after bootstrap + got, err := store.ByID(1) + require.NoError(t, err) + assert.Equal(t, uint64(1), got.ID) + + byAddr, _ := collectBootstrapperByAddress(t, store, txs[0], 10, nil) + require.Len(t, byAddr, 1) + assert.Equal(t, uint64(1), byAddr[0].ID) + + all, _ := collectBootstrapperAll(t, store, 10, nil) + require.Len(t, all, 1) + }) +} + +func TestScheduledTransactionsBootstrapper_SecondStore_Succeeds(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 5) + require.NoError(t, err) + + addr := unittest.RandomAddressFixture() + tx1 := makeScheduledTx(1, addr) + tx2 := makeScheduledTx(2, addr) + + err = storeBootstrapperScheduledTx(t, store, storageDB, 5, []access.ScheduledTransaction{tx1}) + require.NoError(t, err) + + err = storeBootstrapperScheduledTx(t, store, storageDB, 6, []access.ScheduledTransaction{tx2}) + require.NoError(t, err) + + byAddr, _ := collectBootstrapperByAddress(t, store, tx1, 10, nil) + require.Len(t, byAddr, 2) + + latest, err := store.LatestIndexedHeight() + require.NoError(t, err) + assert.Equal(t, uint64(6), latest) + }) +} + +func TestScheduledTransactionsBootstrapper_UninitializedFirstHeight(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 42) + require.NoError(t, err) + + height, initialized := store.UninitializedFirstHeight() + assert.Equal(t, uint64(42), height) + assert.False(t, initialized) + + err = storeBootstrapperScheduledTx(t, store, storageDB, 42, nil) + require.NoError(t, err) + + height, initialized = store.UninitializedFirstHeight() + assert.Equal(t, uint64(42), height) + assert.True(t, initialized) + }) +} + +func TestScheduledTransactionsBootstrapper_HeightMethods_Uninitialized(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 42) + require.NoError(t, err) + + height, err := store.FirstIndexedHeight() + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + assert.Equal(t, uint64(0), height) + + height, err = store.LatestIndexedHeight() + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + assert.Equal(t, uint64(0), height) + }) +} + +func TestScheduledTransactionsBootstrapper_HeightMethods_Initialized(t *testing.T) { + t.Parallel() + + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + store, err := indexes.NewScheduledTransactionsBootstrapper(storageDB, 7) + require.NoError(t, err) + + err = storeBootstrapperScheduledTx(t, store, storageDB, 7, nil) + require.NoError(t, err) + + first, err := store.FirstIndexedHeight() + require.NoError(t, err) + assert.Equal(t, uint64(7), first) + + latest, err := store.LatestIndexedHeight() + require.NoError(t, err) + assert.Equal(t, uint64(7), latest) + }) +} + +func TestScheduledTransactionsBootstrapper_AlreadyBootstrapped(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 5, func(db storage.DB, _ storage.LockManager, _ *indexes.ScheduledTransactionsIndex) { + store, err := indexes.NewScheduledTransactionsBootstrapper(db, 5) + require.NoError(t, err) + + first, err := store.FirstIndexedHeight() + require.NoError(t, err) + assert.Equal(t, uint64(5), first) + }) +} + +func TestScheduledTransactionsBootstrapper_DoubleBootstrapProtection(t *testing.T) { + t.Parallel() + + lockManager := storage.NewTestingLockManager() + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + storageDB := pebbleimpl.ToDB(db) + + err := unittest.WithLock(t, lockManager, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + _, bootstrapErr := indexes.BootstrapScheduledTransactions(lctx, rw, storageDB, 1, nil) + return bootstrapErr + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + _, bootstrapErr := indexes.BootstrapScheduledTransactions(lctx, rw, storageDB, 1, nil) + return bootstrapErr + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) +} + +func TestScheduledTransactionsBootstrapper_PersistenceAcrossRestart(t *testing.T) { + t.Parallel() + + unittest.RunWithTempDir(t, func(dir string) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(99, addr) + + func() { + db := openPebbleScheduledTxDB(t, dir) + defer db.Close() + + store, err := indexes.NewScheduledTransactionsBootstrapper(db, 100) + require.NoError(t, err) + + _, err = store.FirstIndexedHeight() + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + + err = storeBootstrapperScheduledTx(t, store, db, 100, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + }() + + db := openPebbleScheduledTxDB(t, dir) + defer db.Close() + + store, err := indexes.NewScheduledTransactionsBootstrapper(db, 100) + require.NoError(t, err) + + first, err := store.FirstIndexedHeight() + require.NoError(t, err) + assert.Equal(t, uint64(100), first) + + got, err := store.ByID(99) + require.NoError(t, err) + assert.Equal(t, uint64(99), got.ID) + }) +} diff --git a/storage/indexes/scheduled_transactions_test.go b/storage/indexes/scheduled_transactions_test.go new file mode 100644 index 00000000000..45babdae7ee --- /dev/null +++ b/storage/indexes/scheduled_transactions_test.go @@ -0,0 +1,390 @@ +package indexes_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/indexes" + "github.com/onflow/flow-go/storage/indexes/iterator" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +// RunWithBootstrappedScheduledTxIndex creates a Pebble DB and bootstraps the scheduled +// transactions index at the given start height. The callback receives the DB, lock manager, +// and the bootstrapped index. +func RunWithBootstrappedScheduledTxIndex( + tb testing.TB, + startHeight uint64, + f func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex), +) { + unittest.RunWithPebbleDB(tb, func(db *pebble.DB) { + lm := storage.NewTestingLockManager() + storageDB := pebbleimpl.ToDB(db) + + var idx *indexes.ScheduledTransactionsIndex + err := unittest.WithLock(tb, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + var bootstrapErr error + idx, bootstrapErr = indexes.BootstrapScheduledTransactions(lctx, rw, storageDB, startHeight, nil) + return bootstrapErr + }) + }) + require.NoError(tb, err) + + f(storageDB, lm, idx) + }) +} + +// storeScheduledTxs is a helper that stores transactions under the required lock. +func storeScheduledTxs( + tb testing.TB, + lm storage.LockManager, + idx *indexes.ScheduledTransactionsIndex, + db storage.DB, + blockHeight uint64, + txs []access.ScheduledTransaction, +) error { + return unittest.WithLock(tb, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return idx.Store(lctx, rw, blockHeight, txs) + }) + }) +} + +// executeTx is a helper that calls Executed under the required lock. +func executeTx( + tb testing.TB, + lm storage.LockManager, + idx *indexes.ScheduledTransactionsIndex, + db storage.DB, + id uint64, + transactionID flow.Identifier, +) error { + return unittest.WithLock(tb, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return idx.Executed(lctx, rw, id, transactionID) + }) + }) +} + +// cancelTx is a helper that calls Cancelled under the required lock. +func cancelTx( + tb testing.TB, + lm storage.LockManager, + idx *indexes.ScheduledTransactionsIndex, + db storage.DB, + id uint64, + feesReturned uint64, + feesDeducted uint64, + transactionID flow.Identifier, +) error { + return unittest.WithLock(tb, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return idx.Cancelled(lctx, rw, id, feesReturned, feesDeducted, transactionID) + }) + }) +} + +// collectAll is a test helper that collects all results from the index using CollectResults. +func collectAll(tb testing.TB, idx *indexes.ScheduledTransactionsIndex, limit uint32, cursor *access.ScheduledTransactionCursor, filter storage.IndexFilter[*access.ScheduledTransaction]) ([]access.ScheduledTransaction, *access.ScheduledTransactionCursor) { + tb.Helper() + iter, err := idx.All(cursor) + require.NoError(tb, err) + collected, nextCursor, err := iterator.CollectResults(iter, limit, filter) + require.NoError(tb, err) + return collected, nextCursor +} + +// collectByAddress is a test helper that collects results for an address using CollectResults. +func collectByAddress(tb testing.TB, idx *indexes.ScheduledTransactionsIndex, addr flow.Address, limit uint32, cursor *access.ScheduledTransactionCursor, filter storage.IndexFilter[*access.ScheduledTransaction]) ([]access.ScheduledTransaction, *access.ScheduledTransactionCursor) { + tb.Helper() + iter, err := idx.ByAddress(addr, cursor) + require.NoError(tb, err) + collected, nextCursor, err := iterator.CollectResults(iter, limit, filter) + require.NoError(tb, err) + return collected, nextCursor +} + +// makeScheduledTx builds a minimal ScheduledTransaction with the given ID and address. +func makeScheduledTx(id uint64, addr flow.Address) access.ScheduledTransaction { + return access.ScheduledTransaction{ + ID: id, + Priority: 1, + Timestamp: 1000, + Fees: 500, + TransactionHandlerOwner: addr, + TransactionHandlerTypeIdentifier: "A.0000000000000001.Contract", + TransactionHandlerUUID: 42, + TransactionHandlerPublicPath: "handler", + Status: access.ScheduledTxStatusScheduled, + } +} + +func TestScheduledTransactionsIndex_StoreAndByID(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(100, addr) + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + got, err := idx.ByID(100) + require.NoError(t, err) + assert.Equal(t, tx.ID, got.ID) + assert.Equal(t, tx.Priority, got.Priority) + assert.Equal(t, tx.Timestamp, got.Timestamp) + assert.Equal(t, tx.Fees, got.Fees) + assert.Equal(t, tx.TransactionHandlerOwner, got.TransactionHandlerOwner) + assert.Equal(t, tx.TransactionHandlerTypeIdentifier, got.TransactionHandlerTypeIdentifier) + assert.Equal(t, tx.TransactionHandlerUUID, got.TransactionHandlerUUID) + assert.Equal(t, tx.TransactionHandlerPublicPath, got.TransactionHandlerPublicPath) + assert.Equal(t, access.ScheduledTxStatusScheduled, got.Status) + }) +} + +func TestScheduledTransactionsIndex_StoreDuplicate(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(200, addr) + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + // Storing the same tx ID again always returns ErrAlreadyExists. + err = storeScheduledTxs(t, lm, idx, db, 3, []access.ScheduledTransaction{tx}) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) +} + +func TestScheduledTransactionsIndex_Executed_Happy(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(400, addr) + executedTxID := unittest.IdentifierFixture() + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + err = executeTx(t, lm, idx, db, 400, executedTxID) + require.NoError(t, err) + + got, err := idx.ByID(400) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusExecuted, got.Status) + assert.Equal(t, executedTxID, got.ExecutedTransactionID) + }) +} + +func TestScheduledTransactionsIndex_Executed_NotFound(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + err := executeTx(t, lm, idx, db, 9999, unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +func TestScheduledTransactionsIndex_Executed_AlreadyTerminal(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(500, addr) + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + err = executeTx(t, lm, idx, db, 500, unittest.IdentifierFixture()) + require.NoError(t, err) + + // Second call should fail. + err = executeTx(t, lm, idx, db, 500, unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrInvalidStatusTransition) + }) +} + +func TestScheduledTransactionsIndex_Cancelled_Happy(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(600, addr) + cancelledTxID := unittest.IdentifierFixture() + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + err = cancelTx(t, lm, idx, db, 600, 50, 10, cancelledTxID) + require.NoError(t, err) + + got, err := idx.ByID(600) + require.NoError(t, err) + assert.Equal(t, access.ScheduledTxStatusCancelled, got.Status) + assert.Equal(t, uint64(50), got.FeesReturned) + assert.Equal(t, uint64(10), got.FeesDeducted) + assert.Equal(t, cancelledTxID, got.CancelledTransactionID) + }) +} + +func TestScheduledTransactionsIndex_Cancelled_AlreadyTerminal(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(700, addr) + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + err = cancelTx(t, lm, idx, db, 700, 10, 5, unittest.IdentifierFixture()) + require.NoError(t, err) + + // Second cancellation should fail. + err = cancelTx(t, lm, idx, db, 700, 10, 5, unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrInvalidStatusTransition) + }) +} + +func TestScheduledTransactionsIndex_ExecutedThenCancelled(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + tx := makeScheduledTx(800, addr) + + err := storeScheduledTxs(t, lm, idx, db, 2, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + + err = executeTx(t, lm, idx, db, 800, unittest.IdentifierFixture()) + require.NoError(t, err) + + // Cancelling an executed tx should fail. + err = cancelTx(t, lm, idx, db, 800, 10, 5, unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrInvalidStatusTransition) + }) +} + +func TestScheduledTransactionsIndex_All_Pagination(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + + // Store txs with IDs 1-5, one per block height. + for i := uint64(1); i <= 5; i++ { + tx := makeScheduledTx(i, addr) + err := storeScheduledTxs(t, lm, idx, db, i+1, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + } + + // Page 1: limit=2, expect IDs 5, 4 (highest first). + page1, cursor1 := collectAll(t, idx, 2, nil, nil) + require.Len(t, page1, 2) + require.NotNil(t, cursor1) + assert.Equal(t, uint64(5), page1[0].ID) + assert.Equal(t, uint64(4), page1[1].ID) + + // Page 2: use cursor, expect IDs 3, 2. + page2, cursor2 := collectAll(t, idx, 2, cursor1, nil) + require.Len(t, page2, 2) + require.NotNil(t, cursor2) + assert.Equal(t, uint64(3), page2[0].ID) + assert.Equal(t, uint64(2), page2[1].ID) + + // Page 3: expect only ID 1, no next cursor. + page3, cursor3 := collectAll(t, idx, 2, cursor2, nil) + require.Len(t, page3, 1) + assert.Nil(t, cursor3) + assert.Equal(t, uint64(1), page3[0].ID) + }) +} + +func TestScheduledTransactionsIndex_ByAddress_Pagination(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr1 := unittest.RandomAddressFixture() + addr2 := unittest.RandomAddressFixture() + + // Store 3 txs for addr1 (IDs 1, 2, 3) and 2 txs for addr2 (IDs 4, 5). + blockHeight := uint64(2) + for _, tx := range []access.ScheduledTransaction{ + makeScheduledTx(1, addr1), + makeScheduledTx(2, addr1), + makeScheduledTx(3, addr1), + } { + err := storeScheduledTxs(t, lm, idx, db, blockHeight, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + blockHeight++ + } + for _, tx := range []access.ScheduledTransaction{ + makeScheduledTx(4, addr2), + makeScheduledTx(5, addr2), + } { + err := storeScheduledTxs(t, lm, idx, db, blockHeight, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + blockHeight++ + } + + // First page for addr1: limit=2, expect IDs 3, 2 (highest first). + page1, cursor1 := collectByAddress(t, idx, addr1, 2, nil, nil) + require.Len(t, page1, 2) + require.NotNil(t, cursor1) + assert.Equal(t, uint64(3), page1[0].ID) + assert.Equal(t, uint64(2), page1[1].ID) + + // Second page for addr1: expect ID 1, no next cursor. + page2, cursor2 := collectByAddress(t, idx, addr1, 2, cursor1, nil) + require.Len(t, page2, 1) + assert.Nil(t, cursor2) + assert.Equal(t, uint64(1), page2[0].ID) + + // addr2 is unaffected: limit=10, expect IDs 5, 4. + pageAddr2, _ := collectByAddress(t, idx, addr2, 10, nil, nil) + require.Len(t, pageAddr2, 2) + assert.Equal(t, uint64(5), pageAddr2[0].ID) + assert.Equal(t, uint64(4), pageAddr2[1].ID) + }) +} + +func TestScheduledTransactionsIndex_All_Filter(t *testing.T) { + t.Parallel() + + RunWithBootstrappedScheduledTxIndex(t, 1, func(db storage.DB, lm storage.LockManager, idx *indexes.ScheduledTransactionsIndex) { + addr := unittest.RandomAddressFixture() + + // Store 3 txs: IDs 1, 2, 3. + for i := uint64(1); i <= 3; i++ { + tx := makeScheduledTx(i, addr) + err := storeScheduledTxs(t, lm, idx, db, i+1, []access.ScheduledTransaction{tx}) + require.NoError(t, err) + } + + // Execute tx with ID=2. + err := executeTx(t, lm, idx, db, 2, unittest.IdentifierFixture()) + require.NoError(t, err) + + // Filter to only Executed txs — should return exactly tx with ID=2. + executedOnly := func(tx *access.ScheduledTransaction) bool { + return tx.Status == access.ScheduledTxStatusExecuted + } + collected, _ := collectAll(t, idx, 10, nil, executedOnly) + require.Len(t, collected, 1) + assert.Equal(t, uint64(2), collected[0].ID) + assert.Equal(t, access.ScheduledTxStatusExecuted, collected[0].Status) + }) +} diff --git a/storage/locks.go b/storage/locks.go index dd7eb4b3227..c410ad5c9ba 100644 --- a/storage/locks.go +++ b/storage/locks.go @@ -54,6 +54,9 @@ const ( LockIndexAccountTransactions = "lock_index_account_transactions" LockIndexFungibleTokenTransfers = "lock_index_fungible_token_transfers" LockIndexNonFungibleTokenTransfers = "lock_index_non_fungible_token_transfers" + // LockIndexScheduledTransactionsIndex protects the extended scheduled transactions index. + // This is distinct from LockIndexScheduledTransaction which protects a different lookup. + LockIndexScheduledTransactionsIndex = "lock_index_scheduled_transactions_index" ) // Locks returns a list of all named locks used by the storage layer. @@ -83,6 +86,7 @@ func Locks() []string { LockIndexAccountTransactions, LockIndexFungibleTokenTransfers, LockIndexNonFungibleTokenTransfers, + LockIndexScheduledTransactionsIndex, } } @@ -140,6 +144,7 @@ var LockGroupAccessExtendedIndexers = []string{ LockIndexAccountTransactions, LockIndexFungibleTokenTransfers, LockIndexNonFungibleTokenTransfers, + LockIndexScheduledTransactionsIndex, } // addLocks adds a chain of locks to the builder in the order they appear in the locks slice. diff --git a/storage/mock/scheduled_transactions_index.go b/storage/mock/scheduled_transactions_index.go new file mode 100644 index 00000000000..7a41be8ed9d --- /dev/null +++ b/storage/mock/scheduled_transactions_index.go @@ -0,0 +1,606 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// NewScheduledTransactionsIndex creates a new instance of ScheduledTransactionsIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScheduledTransactionsIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *ScheduledTransactionsIndex { + mock := &ScheduledTransactionsIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// ScheduledTransactionsIndex is an autogenerated mock type for the ScheduledTransactionsIndex type +type ScheduledTransactionsIndex struct { + mock.Mock +} + +type ScheduledTransactionsIndex_Expecter struct { + mock *mock.Mock +} + +func (_m *ScheduledTransactionsIndex) EXPECT() *ScheduledTransactionsIndex_Expecter { + return &ScheduledTransactionsIndex_Expecter{mock: &_m.Mock} +} + +// All provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) All(cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error) { + ret := _mock.Called(cursor) + + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 storage.ScheduledTransactionIterator + var r1 error + if returnFunc, ok := ret.Get(0).(func(*access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)); ok { + return returnFunc(cursor) + } + if returnFunc, ok := ret.Get(0).(func(*access.ScheduledTransactionCursor) storage.ScheduledTransactionIterator); ok { + r0 = returnFunc(cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ScheduledTransactionIterator) + } + } + if returnFunc, ok := ret.Get(1).(func(*access.ScheduledTransactionCursor) error); ok { + r1 = returnFunc(cursor) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndex_All_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'All' +type ScheduledTransactionsIndex_All_Call struct { + *mock.Call +} + +// All is a helper method to define mock.On call +// - cursor *access.ScheduledTransactionCursor +func (_e *ScheduledTransactionsIndex_Expecter) All(cursor interface{}) *ScheduledTransactionsIndex_All_Call { + return &ScheduledTransactionsIndex_All_Call{Call: _e.mock.On("All", cursor)} +} + +func (_c *ScheduledTransactionsIndex_All_Call) Run(run func(cursor *access.ScheduledTransactionCursor)) *ScheduledTransactionsIndex_All_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 *access.ScheduledTransactionCursor + if args[0] != nil { + arg0 = args[0].(*access.ScheduledTransactionCursor) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_All_Call) Return(v storage.ScheduledTransactionIterator, err error) *ScheduledTransactionsIndex_All_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndex_All_Call) RunAndReturn(run func(cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)) *ScheduledTransactionsIndex_All_Call { + _c.Call.Return(run) + return _c +} + +// ByAddress provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) ByAddress(account flow.Address, cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error) { + ret := _mock.Called(account, cursor) + + if len(ret) == 0 { + panic("no return value specified for ByAddress") + } + + var r0 storage.ScheduledTransactionIterator + var r1 error + if returnFunc, ok := ret.Get(0).(func(flow.Address, *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)); ok { + return returnFunc(account, cursor) + } + if returnFunc, ok := ret.Get(0).(func(flow.Address, *access.ScheduledTransactionCursor) storage.ScheduledTransactionIterator); ok { + r0 = returnFunc(account, cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ScheduledTransactionIterator) + } + } + if returnFunc, ok := ret.Get(1).(func(flow.Address, *access.ScheduledTransactionCursor) error); ok { + r1 = returnFunc(account, cursor) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndex_ByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ByAddress' +type ScheduledTransactionsIndex_ByAddress_Call struct { + *mock.Call +} + +// ByAddress is a helper method to define mock.On call +// - account flow.Address +// - cursor *access.ScheduledTransactionCursor +func (_e *ScheduledTransactionsIndex_Expecter) ByAddress(account interface{}, cursor interface{}) *ScheduledTransactionsIndex_ByAddress_Call { + return &ScheduledTransactionsIndex_ByAddress_Call{Call: _e.mock.On("ByAddress", account, cursor)} +} + +func (_c *ScheduledTransactionsIndex_ByAddress_Call) Run(run func(account flow.Address, cursor *access.ScheduledTransactionCursor)) *ScheduledTransactionsIndex_ByAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 flow.Address + if args[0] != nil { + arg0 = args[0].(flow.Address) + } + var arg1 *access.ScheduledTransactionCursor + if args[1] != nil { + arg1 = args[1].(*access.ScheduledTransactionCursor) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_ByAddress_Call) Return(v storage.ScheduledTransactionIterator, err error) *ScheduledTransactionsIndex_ByAddress_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndex_ByAddress_Call) RunAndReturn(run func(account flow.Address, cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)) *ScheduledTransactionsIndex_ByAddress_Call { + _c.Call.Return(run) + return _c +} + +// ByID provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) ByID(id uint64) (access.ScheduledTransaction, error) { + ret := _mock.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 access.ScheduledTransaction + var r1 error + if returnFunc, ok := ret.Get(0).(func(uint64) (access.ScheduledTransaction, error)); ok { + return returnFunc(id) + } + if returnFunc, ok := ret.Get(0).(func(uint64) access.ScheduledTransaction); ok { + r0 = returnFunc(id) + } else { + r0 = ret.Get(0).(access.ScheduledTransaction) + } + if returnFunc, ok := ret.Get(1).(func(uint64) error); ok { + r1 = returnFunc(id) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndex_ByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ByID' +type ScheduledTransactionsIndex_ByID_Call struct { + *mock.Call +} + +// ByID is a helper method to define mock.On call +// - id uint64 +func (_e *ScheduledTransactionsIndex_Expecter) ByID(id interface{}) *ScheduledTransactionsIndex_ByID_Call { + return &ScheduledTransactionsIndex_ByID_Call{Call: _e.mock.On("ByID", id)} +} + +func (_c *ScheduledTransactionsIndex_ByID_Call) Run(run func(id uint64)) *ScheduledTransactionsIndex_ByID_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_ByID_Call) Return(scheduledTransaction access.ScheduledTransaction, err error) *ScheduledTransactionsIndex_ByID_Call { + _c.Call.Return(scheduledTransaction, err) + return _c +} + +func (_c *ScheduledTransactionsIndex_ByID_Call) RunAndReturn(run func(id uint64) (access.ScheduledTransaction, error)) *ScheduledTransactionsIndex_ByID_Call { + _c.Call.Return(run) + return _c +} + +// Cancelled provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) Cancelled(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Cancelled") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, uint64, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndex_Cancelled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cancelled' +type ScheduledTransactionsIndex_Cancelled_Call struct { + *mock.Call +} + +// Cancelled is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - feesReturned uint64 +// - feesDeducted uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndex_Expecter) Cancelled(lctx interface{}, rw interface{}, scheduledTxID interface{}, feesReturned interface{}, feesDeducted interface{}, transactionID interface{}) *ScheduledTransactionsIndex_Cancelled_Call { + return &ScheduledTransactionsIndex_Cancelled_Call{Call: _e.mock.On("Cancelled", lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID)} +} + +func (_c *ScheduledTransactionsIndex_Cancelled_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndex_Cancelled_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 uint64 + if args[3] != nil { + arg3 = args[3].(uint64) + } + var arg4 uint64 + if args[4] != nil { + arg4 = args[4].(uint64) + } + var arg5 flow.Identifier + if args[5] != nil { + arg5 = args[5].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + arg4, + arg5, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_Cancelled_Call) Return(err error) *ScheduledTransactionsIndex_Cancelled_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndex_Cancelled_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndex_Cancelled_Call { + _c.Call.Return(run) + return _c +} + +// Executed provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) Executed(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Executed") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndex_Executed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Executed' +type ScheduledTransactionsIndex_Executed_Call struct { + *mock.Call +} + +// Executed is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndex_Expecter) Executed(lctx interface{}, rw interface{}, scheduledTxID interface{}, transactionID interface{}) *ScheduledTransactionsIndex_Executed_Call { + return &ScheduledTransactionsIndex_Executed_Call{Call: _e.mock.On("Executed", lctx, rw, scheduledTxID, transactionID)} +} + +func (_c *ScheduledTransactionsIndex_Executed_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndex_Executed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 flow.Identifier + if args[3] != nil { + arg3 = args[3].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_Executed_Call) Return(err error) *ScheduledTransactionsIndex_Executed_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndex_Executed_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndex_Executed_Call { + _c.Call.Return(run) + return _c +} + +// Failed provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) Failed(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Failed") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndex_Failed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Failed' +type ScheduledTransactionsIndex_Failed_Call struct { + *mock.Call +} + +// Failed is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndex_Expecter) Failed(lctx interface{}, rw interface{}, scheduledTxID interface{}, transactionID interface{}) *ScheduledTransactionsIndex_Failed_Call { + return &ScheduledTransactionsIndex_Failed_Call{Call: _e.mock.On("Failed", lctx, rw, scheduledTxID, transactionID)} +} + +func (_c *ScheduledTransactionsIndex_Failed_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndex_Failed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 flow.Identifier + if args[3] != nil { + arg3 = args[3].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_Failed_Call) Return(err error) *ScheduledTransactionsIndex_Failed_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndex_Failed_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndex_Failed_Call { + _c.Call.Return(run) + return _c +} + +// FirstIndexedHeight provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) FirstIndexedHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstIndexedHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// ScheduledTransactionsIndex_FirstIndexedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FirstIndexedHeight' +type ScheduledTransactionsIndex_FirstIndexedHeight_Call struct { + *mock.Call +} + +// FirstIndexedHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndex_Expecter) FirstIndexedHeight() *ScheduledTransactionsIndex_FirstIndexedHeight_Call { + return &ScheduledTransactionsIndex_FirstIndexedHeight_Call{Call: _e.mock.On("FirstIndexedHeight")} +} + +func (_c *ScheduledTransactionsIndex_FirstIndexedHeight_Call) Run(run func()) *ScheduledTransactionsIndex_FirstIndexedHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_FirstIndexedHeight_Call) Return(v uint64) *ScheduledTransactionsIndex_FirstIndexedHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *ScheduledTransactionsIndex_FirstIndexedHeight_Call) RunAndReturn(run func() uint64) *ScheduledTransactionsIndex_FirstIndexedHeight_Call { + _c.Call.Return(run) + return _c +} + +// LatestIndexedHeight provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) LatestIndexedHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestIndexedHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// ScheduledTransactionsIndex_LatestIndexedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestIndexedHeight' +type ScheduledTransactionsIndex_LatestIndexedHeight_Call struct { + *mock.Call +} + +// LatestIndexedHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndex_Expecter) LatestIndexedHeight() *ScheduledTransactionsIndex_LatestIndexedHeight_Call { + return &ScheduledTransactionsIndex_LatestIndexedHeight_Call{Call: _e.mock.On("LatestIndexedHeight")} +} + +func (_c *ScheduledTransactionsIndex_LatestIndexedHeight_Call) Run(run func()) *ScheduledTransactionsIndex_LatestIndexedHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_LatestIndexedHeight_Call) Return(v uint64) *ScheduledTransactionsIndex_LatestIndexedHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *ScheduledTransactionsIndex_LatestIndexedHeight_Call) RunAndReturn(run func() uint64) *ScheduledTransactionsIndex_LatestIndexedHeight_Call { + _c.Call.Return(run) + return _c +} + +// Store provides a mock function for the type ScheduledTransactionsIndex +func (_mock *ScheduledTransactionsIndex) Store(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction) error { + ret := _mock.Called(lctx, rw, blockHeight, scheduledTxs) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, []access.ScheduledTransaction) error); ok { + r0 = returnFunc(lctx, rw, blockHeight, scheduledTxs) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndex_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' +type ScheduledTransactionsIndex_Store_Call struct { + *mock.Call +} + +// Store is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - blockHeight uint64 +// - scheduledTxs []access.ScheduledTransaction +func (_e *ScheduledTransactionsIndex_Expecter) Store(lctx interface{}, rw interface{}, blockHeight interface{}, scheduledTxs interface{}) *ScheduledTransactionsIndex_Store_Call { + return &ScheduledTransactionsIndex_Store_Call{Call: _e.mock.On("Store", lctx, rw, blockHeight, scheduledTxs)} +} + +func (_c *ScheduledTransactionsIndex_Store_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction)) *ScheduledTransactionsIndex_Store_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 []access.ScheduledTransaction + if args[3] != nil { + arg3 = args[3].([]access.ScheduledTransaction) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndex_Store_Call) Return(err error) *ScheduledTransactionsIndex_Store_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndex_Store_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction) error) *ScheduledTransactionsIndex_Store_Call { + _c.Call.Return(run) + return _c +} diff --git a/storage/mock/scheduled_transactions_index_bootstrapper.go b/storage/mock/scheduled_transactions_index_bootstrapper.go new file mode 100644 index 00000000000..ab2766569a0 --- /dev/null +++ b/storage/mock/scheduled_transactions_index_bootstrapper.go @@ -0,0 +1,677 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// NewScheduledTransactionsIndexBootstrapper creates a new instance of ScheduledTransactionsIndexBootstrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScheduledTransactionsIndexBootstrapper(t interface { + mock.TestingT + Cleanup(func()) +}) *ScheduledTransactionsIndexBootstrapper { + mock := &ScheduledTransactionsIndexBootstrapper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// ScheduledTransactionsIndexBootstrapper is an autogenerated mock type for the ScheduledTransactionsIndexBootstrapper type +type ScheduledTransactionsIndexBootstrapper struct { + mock.Mock +} + +type ScheduledTransactionsIndexBootstrapper_Expecter struct { + mock *mock.Mock +} + +func (_m *ScheduledTransactionsIndexBootstrapper) EXPECT() *ScheduledTransactionsIndexBootstrapper_Expecter { + return &ScheduledTransactionsIndexBootstrapper_Expecter{mock: &_m.Mock} +} + +// All provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) All(cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error) { + ret := _mock.Called(cursor) + + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 storage.ScheduledTransactionIterator + var r1 error + if returnFunc, ok := ret.Get(0).(func(*access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)); ok { + return returnFunc(cursor) + } + if returnFunc, ok := ret.Get(0).(func(*access.ScheduledTransactionCursor) storage.ScheduledTransactionIterator); ok { + r0 = returnFunc(cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ScheduledTransactionIterator) + } + } + if returnFunc, ok := ret.Get(1).(func(*access.ScheduledTransactionCursor) error); ok { + r1 = returnFunc(cursor) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexBootstrapper_All_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'All' +type ScheduledTransactionsIndexBootstrapper_All_Call struct { + *mock.Call +} + +// All is a helper method to define mock.On call +// - cursor *access.ScheduledTransactionCursor +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) All(cursor interface{}) *ScheduledTransactionsIndexBootstrapper_All_Call { + return &ScheduledTransactionsIndexBootstrapper_All_Call{Call: _e.mock.On("All", cursor)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_All_Call) Run(run func(cursor *access.ScheduledTransactionCursor)) *ScheduledTransactionsIndexBootstrapper_All_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 *access.ScheduledTransactionCursor + if args[0] != nil { + arg0 = args[0].(*access.ScheduledTransactionCursor) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_All_Call) Return(v storage.ScheduledTransactionIterator, err error) *ScheduledTransactionsIndexBootstrapper_All_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_All_Call) RunAndReturn(run func(cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)) *ScheduledTransactionsIndexBootstrapper_All_Call { + _c.Call.Return(run) + return _c +} + +// ByAddress provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) ByAddress(account flow.Address, cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error) { + ret := _mock.Called(account, cursor) + + if len(ret) == 0 { + panic("no return value specified for ByAddress") + } + + var r0 storage.ScheduledTransactionIterator + var r1 error + if returnFunc, ok := ret.Get(0).(func(flow.Address, *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)); ok { + return returnFunc(account, cursor) + } + if returnFunc, ok := ret.Get(0).(func(flow.Address, *access.ScheduledTransactionCursor) storage.ScheduledTransactionIterator); ok { + r0 = returnFunc(account, cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ScheduledTransactionIterator) + } + } + if returnFunc, ok := ret.Get(1).(func(flow.Address, *access.ScheduledTransactionCursor) error); ok { + r1 = returnFunc(account, cursor) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexBootstrapper_ByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ByAddress' +type ScheduledTransactionsIndexBootstrapper_ByAddress_Call struct { + *mock.Call +} + +// ByAddress is a helper method to define mock.On call +// - account flow.Address +// - cursor *access.ScheduledTransactionCursor +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) ByAddress(account interface{}, cursor interface{}) *ScheduledTransactionsIndexBootstrapper_ByAddress_Call { + return &ScheduledTransactionsIndexBootstrapper_ByAddress_Call{Call: _e.mock.On("ByAddress", account, cursor)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_ByAddress_Call) Run(run func(account flow.Address, cursor *access.ScheduledTransactionCursor)) *ScheduledTransactionsIndexBootstrapper_ByAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 flow.Address + if args[0] != nil { + arg0 = args[0].(flow.Address) + } + var arg1 *access.ScheduledTransactionCursor + if args[1] != nil { + arg1 = args[1].(*access.ScheduledTransactionCursor) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_ByAddress_Call) Return(v storage.ScheduledTransactionIterator, err error) *ScheduledTransactionsIndexBootstrapper_ByAddress_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_ByAddress_Call) RunAndReturn(run func(account flow.Address, cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)) *ScheduledTransactionsIndexBootstrapper_ByAddress_Call { + _c.Call.Return(run) + return _c +} + +// ByID provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) ByID(id uint64) (access.ScheduledTransaction, error) { + ret := _mock.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 access.ScheduledTransaction + var r1 error + if returnFunc, ok := ret.Get(0).(func(uint64) (access.ScheduledTransaction, error)); ok { + return returnFunc(id) + } + if returnFunc, ok := ret.Get(0).(func(uint64) access.ScheduledTransaction); ok { + r0 = returnFunc(id) + } else { + r0 = ret.Get(0).(access.ScheduledTransaction) + } + if returnFunc, ok := ret.Get(1).(func(uint64) error); ok { + r1 = returnFunc(id) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexBootstrapper_ByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ByID' +type ScheduledTransactionsIndexBootstrapper_ByID_Call struct { + *mock.Call +} + +// ByID is a helper method to define mock.On call +// - id uint64 +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) ByID(id interface{}) *ScheduledTransactionsIndexBootstrapper_ByID_Call { + return &ScheduledTransactionsIndexBootstrapper_ByID_Call{Call: _e.mock.On("ByID", id)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_ByID_Call) Run(run func(id uint64)) *ScheduledTransactionsIndexBootstrapper_ByID_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_ByID_Call) Return(scheduledTransaction access.ScheduledTransaction, err error) *ScheduledTransactionsIndexBootstrapper_ByID_Call { + _c.Call.Return(scheduledTransaction, err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_ByID_Call) RunAndReturn(run func(id uint64) (access.ScheduledTransaction, error)) *ScheduledTransactionsIndexBootstrapper_ByID_Call { + _c.Call.Return(run) + return _c +} + +// Cancelled provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) Cancelled(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Cancelled") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, uint64, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexBootstrapper_Cancelled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cancelled' +type ScheduledTransactionsIndexBootstrapper_Cancelled_Call struct { + *mock.Call +} + +// Cancelled is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - feesReturned uint64 +// - feesDeducted uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) Cancelled(lctx interface{}, rw interface{}, scheduledTxID interface{}, feesReturned interface{}, feesDeducted interface{}, transactionID interface{}) *ScheduledTransactionsIndexBootstrapper_Cancelled_Call { + return &ScheduledTransactionsIndexBootstrapper_Cancelled_Call{Call: _e.mock.On("Cancelled", lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Cancelled_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndexBootstrapper_Cancelled_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 uint64 + if args[3] != nil { + arg3 = args[3].(uint64) + } + var arg4 uint64 + if args[4] != nil { + arg4 = args[4].(uint64) + } + var arg5 flow.Identifier + if args[5] != nil { + arg5 = args[5].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + arg4, + arg5, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Cancelled_Call) Return(err error) *ScheduledTransactionsIndexBootstrapper_Cancelled_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Cancelled_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndexBootstrapper_Cancelled_Call { + _c.Call.Return(run) + return _c +} + +// Executed provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) Executed(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Executed") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexBootstrapper_Executed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Executed' +type ScheduledTransactionsIndexBootstrapper_Executed_Call struct { + *mock.Call +} + +// Executed is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) Executed(lctx interface{}, rw interface{}, scheduledTxID interface{}, transactionID interface{}) *ScheduledTransactionsIndexBootstrapper_Executed_Call { + return &ScheduledTransactionsIndexBootstrapper_Executed_Call{Call: _e.mock.On("Executed", lctx, rw, scheduledTxID, transactionID)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Executed_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndexBootstrapper_Executed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 flow.Identifier + if args[3] != nil { + arg3 = args[3].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Executed_Call) Return(err error) *ScheduledTransactionsIndexBootstrapper_Executed_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Executed_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndexBootstrapper_Executed_Call { + _c.Call.Return(run) + return _c +} + +// Failed provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) Failed(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Failed") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexBootstrapper_Failed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Failed' +type ScheduledTransactionsIndexBootstrapper_Failed_Call struct { + *mock.Call +} + +// Failed is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) Failed(lctx interface{}, rw interface{}, scheduledTxID interface{}, transactionID interface{}) *ScheduledTransactionsIndexBootstrapper_Failed_Call { + return &ScheduledTransactionsIndexBootstrapper_Failed_Call{Call: _e.mock.On("Failed", lctx, rw, scheduledTxID, transactionID)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Failed_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndexBootstrapper_Failed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 flow.Identifier + if args[3] != nil { + arg3 = args[3].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Failed_Call) Return(err error) *ScheduledTransactionsIndexBootstrapper_Failed_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Failed_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndexBootstrapper_Failed_Call { + _c.Call.Return(run) + return _c +} + +// FirstIndexedHeight provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) FirstIndexedHeight() (uint64, error) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstIndexedHeight") + } + + var r0 uint64 + var r1 error + if returnFunc, ok := ret.Get(0).(func() (uint64, error)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + if returnFunc, ok := ret.Get(1).(func() error); ok { + r1 = returnFunc() + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FirstIndexedHeight' +type ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call struct { + *mock.Call +} + +// FirstIndexedHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) FirstIndexedHeight() *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call { + return &ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call{Call: _e.mock.On("FirstIndexedHeight")} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call) Run(run func()) *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call) Return(v uint64, err error) *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call) RunAndReturn(run func() (uint64, error)) *ScheduledTransactionsIndexBootstrapper_FirstIndexedHeight_Call { + _c.Call.Return(run) + return _c +} + +// LatestIndexedHeight provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) LatestIndexedHeight() (uint64, error) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestIndexedHeight") + } + + var r0 uint64 + var r1 error + if returnFunc, ok := ret.Get(0).(func() (uint64, error)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + if returnFunc, ok := ret.Get(1).(func() error); ok { + r1 = returnFunc() + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestIndexedHeight' +type ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call struct { + *mock.Call +} + +// LatestIndexedHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) LatestIndexedHeight() *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call { + return &ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call{Call: _e.mock.On("LatestIndexedHeight")} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call) Run(run func()) *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call) Return(v uint64, err error) *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call) RunAndReturn(run func() (uint64, error)) *ScheduledTransactionsIndexBootstrapper_LatestIndexedHeight_Call { + _c.Call.Return(run) + return _c +} + +// Store provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) Store(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction) error { + ret := _mock.Called(lctx, rw, blockHeight, scheduledTxs) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, []access.ScheduledTransaction) error); ok { + r0 = returnFunc(lctx, rw, blockHeight, scheduledTxs) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexBootstrapper_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' +type ScheduledTransactionsIndexBootstrapper_Store_Call struct { + *mock.Call +} + +// Store is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - blockHeight uint64 +// - scheduledTxs []access.ScheduledTransaction +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) Store(lctx interface{}, rw interface{}, blockHeight interface{}, scheduledTxs interface{}) *ScheduledTransactionsIndexBootstrapper_Store_Call { + return &ScheduledTransactionsIndexBootstrapper_Store_Call{Call: _e.mock.On("Store", lctx, rw, blockHeight, scheduledTxs)} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Store_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction)) *ScheduledTransactionsIndexBootstrapper_Store_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 []access.ScheduledTransaction + if args[3] != nil { + arg3 = args[3].([]access.ScheduledTransaction) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Store_Call) Return(err error) *ScheduledTransactionsIndexBootstrapper_Store_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_Store_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction) error) *ScheduledTransactionsIndexBootstrapper_Store_Call { + _c.Call.Return(run) + return _c +} + +// UninitializedFirstHeight provides a mock function for the type ScheduledTransactionsIndexBootstrapper +func (_mock *ScheduledTransactionsIndexBootstrapper) UninitializedFirstHeight() (uint64, bool) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for UninitializedFirstHeight") + } + + var r0 uint64 + var r1 bool + if returnFunc, ok := ret.Get(0).(func() (uint64, bool)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + if returnFunc, ok := ret.Get(1).(func() bool); ok { + r1 = returnFunc() + } else { + r1 = ret.Get(1).(bool) + } + return r0, r1 +} + +// ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UninitializedFirstHeight' +type ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call struct { + *mock.Call +} + +// UninitializedFirstHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndexBootstrapper_Expecter) UninitializedFirstHeight() *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call { + return &ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call{Call: _e.mock.On("UninitializedFirstHeight")} +} + +func (_c *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call) Run(run func()) *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call) Return(v uint64, b bool) *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call { + _c.Call.Return(v, b) + return _c +} + +func (_c *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call) RunAndReturn(run func() (uint64, bool)) *ScheduledTransactionsIndexBootstrapper_UninitializedFirstHeight_Call { + _c.Call.Return(run) + return _c +} diff --git a/storage/mock/scheduled_transactions_index_range_reader.go b/storage/mock/scheduled_transactions_index_range_reader.go new file mode 100644 index 00000000000..537354fe8fb --- /dev/null +++ b/storage/mock/scheduled_transactions_index_range_reader.go @@ -0,0 +1,124 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + mock "github.com/stretchr/testify/mock" +) + +// NewScheduledTransactionsIndexRangeReader creates a new instance of ScheduledTransactionsIndexRangeReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScheduledTransactionsIndexRangeReader(t interface { + mock.TestingT + Cleanup(func()) +}) *ScheduledTransactionsIndexRangeReader { + mock := &ScheduledTransactionsIndexRangeReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// ScheduledTransactionsIndexRangeReader is an autogenerated mock type for the ScheduledTransactionsIndexRangeReader type +type ScheduledTransactionsIndexRangeReader struct { + mock.Mock +} + +type ScheduledTransactionsIndexRangeReader_Expecter struct { + mock *mock.Mock +} + +func (_m *ScheduledTransactionsIndexRangeReader) EXPECT() *ScheduledTransactionsIndexRangeReader_Expecter { + return &ScheduledTransactionsIndexRangeReader_Expecter{mock: &_m.Mock} +} + +// FirstIndexedHeight provides a mock function for the type ScheduledTransactionsIndexRangeReader +func (_mock *ScheduledTransactionsIndexRangeReader) FirstIndexedHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstIndexedHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FirstIndexedHeight' +type ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call struct { + *mock.Call +} + +// FirstIndexedHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndexRangeReader_Expecter) FirstIndexedHeight() *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call { + return &ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call{Call: _e.mock.On("FirstIndexedHeight")} +} + +func (_c *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call) Run(run func()) *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call) Return(v uint64) *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call) RunAndReturn(run func() uint64) *ScheduledTransactionsIndexRangeReader_FirstIndexedHeight_Call { + _c.Call.Return(run) + return _c +} + +// LatestIndexedHeight provides a mock function for the type ScheduledTransactionsIndexRangeReader +func (_mock *ScheduledTransactionsIndexRangeReader) LatestIndexedHeight() uint64 { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestIndexedHeight") + } + + var r0 uint64 + if returnFunc, ok := ret.Get(0).(func() uint64); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(uint64) + } + return r0 +} + +// ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestIndexedHeight' +type ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call struct { + *mock.Call +} + +// LatestIndexedHeight is a helper method to define mock.On call +func (_e *ScheduledTransactionsIndexRangeReader_Expecter) LatestIndexedHeight() *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call { + return &ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call{Call: _e.mock.On("LatestIndexedHeight")} +} + +func (_c *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call) Run(run func()) *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call) Return(v uint64) *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call { + _c.Call.Return(v) + return _c +} + +func (_c *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call) RunAndReturn(run func() uint64) *ScheduledTransactionsIndexRangeReader_LatestIndexedHeight_Call { + _c.Call.Return(run) + return _c +} diff --git a/storage/mock/scheduled_transactions_index_reader.go b/storage/mock/scheduled_transactions_index_reader.go new file mode 100644 index 00000000000..a204d1fb572 --- /dev/null +++ b/storage/mock/scheduled_transactions_index_reader.go @@ -0,0 +1,229 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// NewScheduledTransactionsIndexReader creates a new instance of ScheduledTransactionsIndexReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScheduledTransactionsIndexReader(t interface { + mock.TestingT + Cleanup(func()) +}) *ScheduledTransactionsIndexReader { + mock := &ScheduledTransactionsIndexReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// ScheduledTransactionsIndexReader is an autogenerated mock type for the ScheduledTransactionsIndexReader type +type ScheduledTransactionsIndexReader struct { + mock.Mock +} + +type ScheduledTransactionsIndexReader_Expecter struct { + mock *mock.Mock +} + +func (_m *ScheduledTransactionsIndexReader) EXPECT() *ScheduledTransactionsIndexReader_Expecter { + return &ScheduledTransactionsIndexReader_Expecter{mock: &_m.Mock} +} + +// All provides a mock function for the type ScheduledTransactionsIndexReader +func (_mock *ScheduledTransactionsIndexReader) All(cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error) { + ret := _mock.Called(cursor) + + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 storage.ScheduledTransactionIterator + var r1 error + if returnFunc, ok := ret.Get(0).(func(*access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)); ok { + return returnFunc(cursor) + } + if returnFunc, ok := ret.Get(0).(func(*access.ScheduledTransactionCursor) storage.ScheduledTransactionIterator); ok { + r0 = returnFunc(cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ScheduledTransactionIterator) + } + } + if returnFunc, ok := ret.Get(1).(func(*access.ScheduledTransactionCursor) error); ok { + r1 = returnFunc(cursor) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexReader_All_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'All' +type ScheduledTransactionsIndexReader_All_Call struct { + *mock.Call +} + +// All is a helper method to define mock.On call +// - cursor *access.ScheduledTransactionCursor +func (_e *ScheduledTransactionsIndexReader_Expecter) All(cursor interface{}) *ScheduledTransactionsIndexReader_All_Call { + return &ScheduledTransactionsIndexReader_All_Call{Call: _e.mock.On("All", cursor)} +} + +func (_c *ScheduledTransactionsIndexReader_All_Call) Run(run func(cursor *access.ScheduledTransactionCursor)) *ScheduledTransactionsIndexReader_All_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 *access.ScheduledTransactionCursor + if args[0] != nil { + arg0 = args[0].(*access.ScheduledTransactionCursor) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexReader_All_Call) Return(v storage.ScheduledTransactionIterator, err error) *ScheduledTransactionsIndexReader_All_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndexReader_All_Call) RunAndReturn(run func(cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)) *ScheduledTransactionsIndexReader_All_Call { + _c.Call.Return(run) + return _c +} + +// ByAddress provides a mock function for the type ScheduledTransactionsIndexReader +func (_mock *ScheduledTransactionsIndexReader) ByAddress(account flow.Address, cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error) { + ret := _mock.Called(account, cursor) + + if len(ret) == 0 { + panic("no return value specified for ByAddress") + } + + var r0 storage.ScheduledTransactionIterator + var r1 error + if returnFunc, ok := ret.Get(0).(func(flow.Address, *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)); ok { + return returnFunc(account, cursor) + } + if returnFunc, ok := ret.Get(0).(func(flow.Address, *access.ScheduledTransactionCursor) storage.ScheduledTransactionIterator); ok { + r0 = returnFunc(account, cursor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ScheduledTransactionIterator) + } + } + if returnFunc, ok := ret.Get(1).(func(flow.Address, *access.ScheduledTransactionCursor) error); ok { + r1 = returnFunc(account, cursor) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexReader_ByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ByAddress' +type ScheduledTransactionsIndexReader_ByAddress_Call struct { + *mock.Call +} + +// ByAddress is a helper method to define mock.On call +// - account flow.Address +// - cursor *access.ScheduledTransactionCursor +func (_e *ScheduledTransactionsIndexReader_Expecter) ByAddress(account interface{}, cursor interface{}) *ScheduledTransactionsIndexReader_ByAddress_Call { + return &ScheduledTransactionsIndexReader_ByAddress_Call{Call: _e.mock.On("ByAddress", account, cursor)} +} + +func (_c *ScheduledTransactionsIndexReader_ByAddress_Call) Run(run func(account flow.Address, cursor *access.ScheduledTransactionCursor)) *ScheduledTransactionsIndexReader_ByAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 flow.Address + if args[0] != nil { + arg0 = args[0].(flow.Address) + } + var arg1 *access.ScheduledTransactionCursor + if args[1] != nil { + arg1 = args[1].(*access.ScheduledTransactionCursor) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexReader_ByAddress_Call) Return(v storage.ScheduledTransactionIterator, err error) *ScheduledTransactionsIndexReader_ByAddress_Call { + _c.Call.Return(v, err) + return _c +} + +func (_c *ScheduledTransactionsIndexReader_ByAddress_Call) RunAndReturn(run func(account flow.Address, cursor *access.ScheduledTransactionCursor) (storage.ScheduledTransactionIterator, error)) *ScheduledTransactionsIndexReader_ByAddress_Call { + _c.Call.Return(run) + return _c +} + +// ByID provides a mock function for the type ScheduledTransactionsIndexReader +func (_mock *ScheduledTransactionsIndexReader) ByID(id uint64) (access.ScheduledTransaction, error) { + ret := _mock.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 access.ScheduledTransaction + var r1 error + if returnFunc, ok := ret.Get(0).(func(uint64) (access.ScheduledTransaction, error)); ok { + return returnFunc(id) + } + if returnFunc, ok := ret.Get(0).(func(uint64) access.ScheduledTransaction); ok { + r0 = returnFunc(id) + } else { + r0 = ret.Get(0).(access.ScheduledTransaction) + } + if returnFunc, ok := ret.Get(1).(func(uint64) error); ok { + r1 = returnFunc(id) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ScheduledTransactionsIndexReader_ByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ByID' +type ScheduledTransactionsIndexReader_ByID_Call struct { + *mock.Call +} + +// ByID is a helper method to define mock.On call +// - id uint64 +func (_e *ScheduledTransactionsIndexReader_Expecter) ByID(id interface{}) *ScheduledTransactionsIndexReader_ByID_Call { + return &ScheduledTransactionsIndexReader_ByID_Call{Call: _e.mock.On("ByID", id)} +} + +func (_c *ScheduledTransactionsIndexReader_ByID_Call) Run(run func(id uint64)) *ScheduledTransactionsIndexReader_ByID_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 uint64 + if args[0] != nil { + arg0 = args[0].(uint64) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexReader_ByID_Call) Return(scheduledTransaction access.ScheduledTransaction, err error) *ScheduledTransactionsIndexReader_ByID_Call { + _c.Call.Return(scheduledTransaction, err) + return _c +} + +func (_c *ScheduledTransactionsIndexReader_ByID_Call) RunAndReturn(run func(id uint64) (access.ScheduledTransaction, error)) *ScheduledTransactionsIndexReader_ByID_Call { + _c.Call.Return(run) + return _c +} diff --git a/storage/mock/scheduled_transactions_index_writer.go b/storage/mock/scheduled_transactions_index_writer.go new file mode 100644 index 00000000000..d734543d200 --- /dev/null +++ b/storage/mock/scheduled_transactions_index_writer.go @@ -0,0 +1,328 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// NewScheduledTransactionsIndexWriter creates a new instance of ScheduledTransactionsIndexWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScheduledTransactionsIndexWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *ScheduledTransactionsIndexWriter { + mock := &ScheduledTransactionsIndexWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// ScheduledTransactionsIndexWriter is an autogenerated mock type for the ScheduledTransactionsIndexWriter type +type ScheduledTransactionsIndexWriter struct { + mock.Mock +} + +type ScheduledTransactionsIndexWriter_Expecter struct { + mock *mock.Mock +} + +func (_m *ScheduledTransactionsIndexWriter) EXPECT() *ScheduledTransactionsIndexWriter_Expecter { + return &ScheduledTransactionsIndexWriter_Expecter{mock: &_m.Mock} +} + +// Cancelled provides a mock function for the type ScheduledTransactionsIndexWriter +func (_mock *ScheduledTransactionsIndexWriter) Cancelled(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Cancelled") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, uint64, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexWriter_Cancelled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cancelled' +type ScheduledTransactionsIndexWriter_Cancelled_Call struct { + *mock.Call +} + +// Cancelled is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - feesReturned uint64 +// - feesDeducted uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndexWriter_Expecter) Cancelled(lctx interface{}, rw interface{}, scheduledTxID interface{}, feesReturned interface{}, feesDeducted interface{}, transactionID interface{}) *ScheduledTransactionsIndexWriter_Cancelled_Call { + return &ScheduledTransactionsIndexWriter_Cancelled_Call{Call: _e.mock.On("Cancelled", lctx, rw, scheduledTxID, feesReturned, feesDeducted, transactionID)} +} + +func (_c *ScheduledTransactionsIndexWriter_Cancelled_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndexWriter_Cancelled_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 uint64 + if args[3] != nil { + arg3 = args[3].(uint64) + } + var arg4 uint64 + if args[4] != nil { + arg4 = args[4].(uint64) + } + var arg5 flow.Identifier + if args[5] != nil { + arg5 = args[5].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + arg4, + arg5, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Cancelled_Call) Return(err error) *ScheduledTransactionsIndexWriter_Cancelled_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Cancelled_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, feesReturned uint64, feesDeducted uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndexWriter_Cancelled_Call { + _c.Call.Return(run) + return _c +} + +// Executed provides a mock function for the type ScheduledTransactionsIndexWriter +func (_mock *ScheduledTransactionsIndexWriter) Executed(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Executed") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexWriter_Executed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Executed' +type ScheduledTransactionsIndexWriter_Executed_Call struct { + *mock.Call +} + +// Executed is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndexWriter_Expecter) Executed(lctx interface{}, rw interface{}, scheduledTxID interface{}, transactionID interface{}) *ScheduledTransactionsIndexWriter_Executed_Call { + return &ScheduledTransactionsIndexWriter_Executed_Call{Call: _e.mock.On("Executed", lctx, rw, scheduledTxID, transactionID)} +} + +func (_c *ScheduledTransactionsIndexWriter_Executed_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndexWriter_Executed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 flow.Identifier + if args[3] != nil { + arg3 = args[3].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Executed_Call) Return(err error) *ScheduledTransactionsIndexWriter_Executed_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Executed_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndexWriter_Executed_Call { + _c.Call.Return(run) + return _c +} + +// Failed provides a mock function for the type ScheduledTransactionsIndexWriter +func (_mock *ScheduledTransactionsIndexWriter) Failed(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error { + ret := _mock.Called(lctx, rw, scheduledTxID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for Failed") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, flow.Identifier) error); ok { + r0 = returnFunc(lctx, rw, scheduledTxID, transactionID) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexWriter_Failed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Failed' +type ScheduledTransactionsIndexWriter_Failed_Call struct { + *mock.Call +} + +// Failed is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - scheduledTxID uint64 +// - transactionID flow.Identifier +func (_e *ScheduledTransactionsIndexWriter_Expecter) Failed(lctx interface{}, rw interface{}, scheduledTxID interface{}, transactionID interface{}) *ScheduledTransactionsIndexWriter_Failed_Call { + return &ScheduledTransactionsIndexWriter_Failed_Call{Call: _e.mock.On("Failed", lctx, rw, scheduledTxID, transactionID)} +} + +func (_c *ScheduledTransactionsIndexWriter_Failed_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier)) *ScheduledTransactionsIndexWriter_Failed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 flow.Identifier + if args[3] != nil { + arg3 = args[3].(flow.Identifier) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Failed_Call) Return(err error) *ScheduledTransactionsIndexWriter_Failed_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Failed_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, scheduledTxID uint64, transactionID flow.Identifier) error) *ScheduledTransactionsIndexWriter_Failed_Call { + _c.Call.Return(run) + return _c +} + +// Store provides a mock function for the type ScheduledTransactionsIndexWriter +func (_mock *ScheduledTransactionsIndexWriter) Store(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction) error { + ret := _mock.Called(lctx, rw, blockHeight, scheduledTxs) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, uint64, []access.ScheduledTransaction) error); ok { + r0 = returnFunc(lctx, rw, blockHeight, scheduledTxs) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// ScheduledTransactionsIndexWriter_Store_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Store' +type ScheduledTransactionsIndexWriter_Store_Call struct { + *mock.Call +} + +// Store is a helper method to define mock.On call +// - lctx lockctx.Proof +// - rw storage.ReaderBatchWriter +// - blockHeight uint64 +// - scheduledTxs []access.ScheduledTransaction +func (_e *ScheduledTransactionsIndexWriter_Expecter) Store(lctx interface{}, rw interface{}, blockHeight interface{}, scheduledTxs interface{}) *ScheduledTransactionsIndexWriter_Store_Call { + return &ScheduledTransactionsIndexWriter_Store_Call{Call: _e.mock.On("Store", lctx, rw, blockHeight, scheduledTxs)} +} + +func (_c *ScheduledTransactionsIndexWriter_Store_Call) Run(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction)) *ScheduledTransactionsIndexWriter_Store_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 lockctx.Proof + if args[0] != nil { + arg0 = args[0].(lockctx.Proof) + } + var arg1 storage.ReaderBatchWriter + if args[1] != nil { + arg1 = args[1].(storage.ReaderBatchWriter) + } + var arg2 uint64 + if args[2] != nil { + arg2 = args[2].(uint64) + } + var arg3 []access.ScheduledTransaction + if args[3] != nil { + arg3 = args[3].([]access.ScheduledTransaction) + } + run( + arg0, + arg1, + arg2, + arg3, + ) + }) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Store_Call) Return(err error) *ScheduledTransactionsIndexWriter_Store_Call { + _c.Call.Return(err) + return _c +} + +func (_c *ScheduledTransactionsIndexWriter_Store_Call) RunAndReturn(run func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockHeight uint64, scheduledTxs []access.ScheduledTransaction) error) *ScheduledTransactionsIndexWriter_Store_Call { + _c.Call.Return(run) + return _c +} diff --git a/storage/scheduled_transactions_index.go b/storage/scheduled_transactions_index.go new file mode 100644 index 00000000000..0ea8ebe1396 --- /dev/null +++ b/storage/scheduled_transactions_index.go @@ -0,0 +1,157 @@ +package storage + +import ( + "github.com/jordanschalm/lockctx" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +// ScheduledTransactionIterator is an iterator over scheduled transactions ordered by +// descending ID (highest first). +type ScheduledTransactionIterator = IndexIterator[accessmodel.ScheduledTransaction, accessmodel.ScheduledTransactionCursor] + +// ScheduledTransactionsIndexReader provides read access to the scheduled transactions index. +// +// All methods are safe for concurrent access. +type ScheduledTransactionsIndexReader interface { + // ByID returns the scheduled transaction with the given scheduler-assigned ID. + // + // Expected error returns during normal operation: + // - [ErrNotFound]: if no scheduled transaction with the given ID exists + ByID(id uint64) (accessmodel.ScheduledTransaction, error) + + // ByAddress returns an iterator over scheduled transactions for the given account, + // ordered by descending ID (highest first). + // Returns an exhausted iterator and no error if the account has no transactions. + // + // `cursor` is a pointer to an [accessmodel.ScheduledTransactionCursor]: + // - nil means start from the highest indexed ID (first page) + // - non-nil means start at the cursor position (inclusive) + // + // Expected error returns during normal operation: + // - [ErrNotBootstrapped]: if the index has not been initialized + ByAddress( + account flow.Address, + cursor *accessmodel.ScheduledTransactionCursor, + ) (ScheduledTransactionIterator, error) + + // All returns an iterator over all scheduled transactions, ordered by descending ID + // (highest first). Returns an exhausted iterator and no error if no transactions exist. + // + // `cursor` is a pointer to an [accessmodel.ScheduledTransactionCursor]: + // - nil means start from the highest indexed ID (first page) + // - non-nil means start at the cursor position (inclusive) + // + // Expected error returns during normal operation: + // - [ErrNotBootstrapped]: if the index has not been initialized + All(cursor *accessmodel.ScheduledTransactionCursor) (ScheduledTransactionIterator, error) +} + +// ScheduledTransactionsIndexRangeReader provides access to the range of indexed heights. +// +// All methods are safe for concurrent access. +type ScheduledTransactionsIndexRangeReader interface { + // FirstIndexedHeight returns the first (oldest) block height that has been indexed. + FirstIndexedHeight() uint64 + + // LatestIndexedHeight returns the latest block height that has been indexed. + LatestIndexedHeight() uint64 +} + +// ScheduledTransactionsIndexWriter provides write access to the scheduled transactions index. +// +// NOT CONCURRENCY SAFE. +type ScheduledTransactionsIndexWriter interface { + // Store indexes all new scheduled transactions from the given block and advances + // the latest indexed height to blockHeight. Must be called with consecutive heights. + // The caller must hold the [LockIndexScheduledTransactionsIndex] lock until the batch + // is committed. + // + // Expected error returns during normal operation: + // - [ErrAlreadyExists]: if blockHeight is already indexed + Store( + lctx lockctx.Proof, + rw ReaderBatchWriter, + blockHeight uint64, + scheduledTxs []accessmodel.ScheduledTransaction, + ) error + + // Executed updates the scheduled transaction's status to Executed and records the ID of the + // transaction that emitted the Executed event. + // The caller must hold the [LockIndexScheduledTransactionsIndex] lock until the batch + // is committed. + // + // Expected error returns during normal operation: + // - [ErrNotFound]: if no scheduled transaction with the given ID exists + // - [ErrInvalidStatusTransition]: if the transaction is already Executed, Cancelled, or Failed + Executed( + lctx lockctx.Proof, + rw ReaderBatchWriter, + scheduledTxID uint64, + transactionID flow.Identifier, + ) error + + // Cancelled updates the scheduled transaction's status to Cancelled and records the + // fee amounts and the ID of the transaction that emitted the Canceled event. + // The caller must hold the [LockIndexScheduledTransactionsIndex] lock until the batch + // is committed. + // + // Expected error returns during normal operation: + // - [ErrNotFound]: if no scheduled transaction with the given ID exists + // - [ErrInvalidStatusTransition]: if the transaction is already Executed, Cancelled, or Failed + Cancelled( + lctx lockctx.Proof, + rw ReaderBatchWriter, + scheduledTxID uint64, + feesReturned uint64, + feesDeducted uint64, + transactionID flow.Identifier, + ) error + + // Failed updates the transaction's status to Failed and records the ID of the executor + // transaction that attempted (and failed) to execute the scheduled transaction. + // The caller must hold the [LockIndexScheduledTransactionsIndex] lock until committed. + // + // Expected error returns during normal operation: + // - [ErrNotFound]: if no entry with the given ID exists + // - [ErrInvalidStatusTransition]: if the transaction is already Executed, Cancelled, or Failed + Failed( + lctx lockctx.Proof, + rw ReaderBatchWriter, + scheduledTxID uint64, + transactionID flow.Identifier, + ) error +} + +// ScheduledTransactionsIndex provides full read and write access to the scheduled transactions index. +type ScheduledTransactionsIndex interface { + ScheduledTransactionsIndexReader + ScheduledTransactionsIndexRangeReader + ScheduledTransactionsIndexWriter +} + +// ScheduledTransactionsIndexBootstrapper wraps [ScheduledTransactionsIndex] and performs +// just-in-time initialization of the index when the initial block is provided. +// +// All read and write methods proxy to the underlying index once initialized. +type ScheduledTransactionsIndexBootstrapper interface { + ScheduledTransactionsIndexReader + ScheduledTransactionsIndexWriter + + // FirstIndexedHeight returns the first (oldest) block height that has been indexed. + // + // Expected error returns during normal operation: + // - [ErrNotBootstrapped]: if the index has not been initialized + FirstIndexedHeight() (uint64, error) + + // LatestIndexedHeight returns the latest block height that has been indexed. + // + // Expected error returns during normal operation: + // - [ErrNotBootstrapped]: if the index has not been initialized + LatestIndexedHeight() (uint64, error) + + // UninitializedFirstHeight returns the height the index will accept as the first + // height, and a boolean indicating whether the index is initialized. + UninitializedFirstHeight() (uint64, bool) +} From 78ca0dea48b92af3c5eb0b35f4319a851824de92 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 06:35:03 -0800 Subject: [PATCH 02/18] fix bootstrap order --- .../node_builder/access_node_builder.go | 139 +++++----- cmd/observer/node_builder/observer_builder.go | 240 +++++++++--------- engine/access/rpc/backend/script_executor.go | 24 +- .../rpc/backend/script_executor_test.go | 44 +--- module/execution/scripts.go | 21 +- module/execution/scripts_test.go | 81 ++++-- .../extended/{ => bootstrap}/bootstrap.go | 83 +++++- .../indexer/extended/events/helpers.go | 4 +- .../extended/events/scheduled_transaction.go | 4 +- .../extended/scheduled_transactions_test.go | 2 +- .../indexer/indexer_core.go | 22 -- .../indexer/indexer_core_test.go | 162 ------------ 12 files changed, 360 insertions(+), 466 deletions(-) rename module/state_synchronization/indexer/extended/{ => bootstrap}/bootstrap.go (60%) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index f810044932c..df6a6f6db54 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -95,6 +95,7 @@ import ( "github.com/onflow/flow-go/module/state_synchronization" "github.com/onflow/flow-go/module/state_synchronization/indexer" "github.com/onflow/flow-go/module/state_synchronization/indexer/extended" + extendedbootstrap "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/bootstrap" edrequester "github.com/onflow/flow-go/module/state_synchronization/requester" "github.com/onflow/flow-go/network" alspmgr "github.com/onflow/flow-go/network/alsp/manager" @@ -342,7 +343,7 @@ type FlowAccessNodeBuilder struct { ExecutionIndexerCore *indexer.IndexerCore ExtendedIndexer *extended.ExtendedIndexer ExtendedBackend *extendedbackend.Backend - ExtendedStorage extended.Storage + ExtendedStorage extendedbootstrap.Storage CollectionIndexer *collections.Indexer CollectionSyncer *collections.Syncer ScriptExecutor *backend.ScriptExecutor @@ -865,9 +866,29 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess builder.IndexerDependencies.Add(&module.NoopReadyDoneAware{}) } else { var indexedBlockHeightInitializer storage.ConsumerProgressInitializer + + scriptExecutorDependendable := module.NewProxiedReadyDoneAware() extendedIndexerDependable := module.NewProxiedReadyDoneAware() + + // Script executor: + // -> registers storage + scriptExecutorDependencies := cmd.NewDependencyList() + scriptExecutorDependencies.Add(registerStorageDependable) + + // Extended indexer: + // -> script executor + extendedIndexerDependencies := cmd.NewDependencyList() + extendedIndexerDependencies.Add(scriptExecutorDependendable) + + // Regular indexer: + // -> script executor + // -> extended indexer + builder.IndexerDependencies.Add(scriptExecutorDependendable) builder.IndexerDependencies.Add(extendedIndexerDependable) + var indexerDerivedChainData *derived.DerivedChainData + var queryDerivedChainData *derived.DerivedChainData + builder. AdminCommand("execute-script", func(config *cmd.NodeConfig) commands.AdminCommand { return stateSyncCommands.NewExecuteScriptCommand(builder.ScriptExecutor) @@ -890,7 +911,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess return nil } - extendedStorage, err := extended.OpenExtendedIndexDB( + extendedStorage, err := extendedbootstrap.OpenExtendedIndexDB( node.Logger, builder.extendedIndexingDBPath, builder.SealedRootBlock.Height, @@ -994,16 +1015,47 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess registerStorageDependable.Init(rda) return rda, nil }, nil). + DependableComponent("script executor", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + var err error + indexerDerivedChainData, queryDerivedChainData, err = builder.buildDerivedChainData() + if err != nil { + return nil, fmt.Errorf("could not create derived chain data: %w", err) + } + + // create script execution module, this depends on the indexer being initialized and the + // having the register storage bootstrapped + scripts := execution.NewScripts( + builder.Logger, + metrics.NewExecutionCollector(builder.Tracer), + builder.RootChainID, + computation.NewProtocolStateWrapper(builder.State), + builder.Storage.Headers, + builder.Storage.RegisterIndex.Get, + builder.scriptExecutorConfig, + queryDerivedChainData, + builder.programCacheSize > 0, + ) + + err = builder.ScriptExecutor.Initialize(builder.Storage.RegisterIndex, scripts, builder.VersionControl) + if err != nil { + return nil, fmt.Errorf("could not initialize script executor: %w", err) + } + + err = builder.RegistersAsyncStore.Initialize(builder.Storage.RegisterIndex) + if err != nil { + return nil, fmt.Errorf("could not initialize registers async store: %w", err) + } + scriptExecutorDependendable.Init(&module.NoopReadyDoneAware{}) + + // the script executor is not a component. it is being started as a DependableComponent + // to ensure dependencies are setup in the correct order. + return &module.NoopReadyDoneAware{}, nil + }, scriptExecutorDependencies). DependableComponent("execution data indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // Note: using a DependableComponent here to ensure that the indexer does not block // other components from starting while bootstrapping the register db since it may // take hours to complete. - indexerDerivedChainData, queryDerivedChainData, err := builder.buildDerivedChainData() - if err != nil { - return nil, fmt.Errorf("could not create derived chain data: %w", err) - } - builder.ExecutionIndexerCore = indexer.New( builder.Logger, metrics.NewExecutionStateIndexerCollector(), @@ -1016,7 +1068,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess utils.NotNil(builder.lightTransactionResults), utils.NotNil(builder.scheduledTransactions), builder.RootChainID, - indexerDerivedChainData, + indexerDerivedChainData, // might be nil if program caching is disabled utils.NotNil(builder.CollectionIndexer), utils.NotNil(builder.collectionExecutedMetric), node.StorageLockMgr, @@ -1051,35 +1103,11 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess // setup requester to notify indexer when new execution data is received execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.ExecutionIndexer.OnExecutionData) - // create script execution module, this depends on the indexer being initialized and the - // having the register storage bootstrapped - scripts := execution.NewScripts( - builder.Logger, - metrics.NewExecutionCollector(builder.Tracer), - builder.RootChainID, - computation.NewProtocolStateWrapper(builder.State), - builder.Storage.Headers, - builder.ExecutionIndexerCore.RegisterValue, - builder.scriptExecutorConfig, - queryDerivedChainData, - builder.programCacheSize > 0, - ) - - err = builder.ScriptExecutor.Initialize(builder.ExecutionIndexer, scripts, builder.VersionControl) - if err != nil { - return nil, err - } - err = builder.Reporter.Initialize(builder.ExecutionIndexer) if err != nil { return nil, err } - err = builder.RegistersAsyncStore.Initialize(builder.Storage.RegisterIndex) - if err != nil { - return nil, err - } - if builder.stopControlEnabled { builder.StopControl.RegisterHeightRecorder(builder.ExecutionIndexer) } @@ -1091,56 +1119,19 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess extendedIndexerDependable.Init(&module.NoopReadyDoneAware{}) } else { builder.DependableComponent("extended indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - accountTransactions, err := extended.NewAccountTransactions( + extendedIndexer, err := extendedbootstrap.BootstrapIndexers( node.Logger, - builder.ExtendedStorage.AccountTransactionsBootstrapper, node.RootChainID, + utils.NotNil(builder.ExtendedStorage), utils.NotNil(builder.StorageLockMgr), - ) - if err != nil { - return nil, fmt.Errorf("could not create account transactions indexer: %w", err) - } - - ftTransfers := extended.NewFungibleTokenTransfers( - node.Logger, - node.RootChainID, - builder.ExtendedStorage.FungibleTokenTransfersBootstrapper, - ) - - nftTransfers := extended.NewNonFungibleTokenTransfers( - node.Logger, - node.RootChainID, - builder.ExtendedStorage.NonFungibleTokenTransfersBootstrapper, - ) - - scheduledTransactions := extended.NewScheduledTransactions( - node.Logger, - builder.ExtendedStorage.ScheduledTransactionsBootstrapper, - builder.ScriptExecutor, - node.RootChainID, - ) - - extendedIndexers := []extended.Indexer{ - accountTransactions, - ftTransfers, - nftTransfers, - scheduledTransactions, - } - - extendedIndexer, err := extended.NewExtendedIndexer( - node.Logger, - metrics.NewExtendedIndexingCollector(), - builder.ExtendedStorage.DB, - utils.NotNil(builder.StorageLockMgr), - utils.NotNil(builder.State), + utils.NotNil(node.State), utils.NotNil(builder.Storage.Index), utils.NotNil(builder.Storage.Headers), utils.NotNil(builder.Storage.Guarantees), utils.NotNil(builder.Storage.Collections), utils.NotNil(builder.events), utils.NotNil(builder.lightTransactionResults), - extendedIndexers, - node.RootChainID, + utils.NotNil(builder.ScriptExecutor), builder.extendedIndexingBackfillDelay, ) if err != nil { @@ -1151,7 +1142,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccess extendedIndexerDependable.Init(builder.ExtendedIndexer) return builder.ExtendedIndexer, nil - }, cmd.NewDependencyList()) + }, extendedIndexerDependencies) } } diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 2dee6152f53..2b21a791e19 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -90,6 +90,7 @@ import ( "github.com/onflow/flow-go/module/state_synchronization" "github.com/onflow/flow-go/module/state_synchronization/indexer" "github.com/onflow/flow-go/module/state_synchronization/indexer/extended" + extendedbootstrap "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/bootstrap" edrequester "github.com/onflow/flow-go/module/state_synchronization/requester" consensus_follower "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" @@ -293,7 +294,7 @@ type ObserverServiceBuilder struct { ExecutionIndexerCore *indexer.IndexerCore ExtendedIndexer *extended.ExtendedIndexer ExtendedBackend *extendedbackend.Backend - ExtendedStorage extended.Storage + ExtendedStorage extendedbootstrap.Storage TxResultsIndex *index.TransactionResultsIndex IndexerDependencies *cmd.DependencyList VersionControl *version.VersionControl @@ -1146,9 +1147,6 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS registerStorageDependable := module.NewProxiedReadyDoneAware() builder.IndexerDependencies.Add(registerStorageDependable) - extendedIndexerDependable := module.NewProxiedReadyDoneAware() - builder.IndexerDependencies.Add(extendedIndexerDependable) - executionDataPrunerEnabled := builder.executionDataPrunerHeightRangeTarget != 0 builder. @@ -1388,6 +1386,28 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS if builder.executionDataIndexingEnabled { var indexedBlockHeightInitializer storage.ConsumerProgressInitializer + scriptExecutorDependendable := module.NewProxiedReadyDoneAware() + extendedIndexerDependable := module.NewProxiedReadyDoneAware() + + // Script executor: + // -> registers storage + scriptExecutorDependencies := cmd.NewDependencyList() + scriptExecutorDependencies.Add(registerStorageDependable) + + // Extended indexer: + // -> script executor + extendedIndexerDependencies := cmd.NewDependencyList() + extendedIndexerDependencies.Add(scriptExecutorDependendable) + + // Regular indexer: + // -> script executor + // -> extended indexer + builder.IndexerDependencies.Add(scriptExecutorDependendable) + builder.IndexerDependencies.Add(extendedIndexerDependable) + + var indexerDerivedChainData *derived.DerivedChainData + var queryDerivedChainData *derived.DerivedChainData + builder.Module("indexed block height consumer progress", func(node *cmd.NodeConfig) error { // Note: progress is stored in the MAIN db since that is where indexed execution data is stored. indexedBlockHeightInitializer = store.NewConsumerProgress(builder.ProtocolDB, module.ConsumeProgressExecutionDataIndexerBlockHeight) @@ -1403,7 +1423,7 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS return nil } - extendedStorage, err := extended.OpenExtendedIndexDB( + extendedStorage, err := extendedbootstrap.OpenExtendedIndexDB( node.Logger, builder.extendedIndexingDBPath, builder.State.Params().SealedRoot().Height, @@ -1519,83 +1539,13 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS rda := &module.NoopReadyDoneAware{} registerStorageDependable.Init(rda) return rda, nil - }, nil).DependableComponent("execution data indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - // Note: using a DependableComponent here to ensure that the indexer does not block - // other components from starting while bootstrapping the register db since it may - // take hours to complete. - - indexerDerivedChainData, queryDerivedChainData, err := builder.buildDerivedChainData() + }, nil).DependableComponent("script executor", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + var err error + indexerDerivedChainData, queryDerivedChainData, err = builder.buildDerivedChainData() if err != nil { return nil, fmt.Errorf("could not create derived chain data: %w", err) } - var collectionExecutedMetric module.CollectionExecutedMetric = metrics.NewNoopCollector() - collectionIndexer, err := collections.NewIndexer( - builder.Logger, - builder.ProtocolDB, - collectionExecutedMetric, - builder.State, - builder.Storage.Blocks, - builder.Storage.Collections, - builder.lastFullBlockHeight, - builder.StorageLockMgr, - ) - if err != nil { - return nil, fmt.Errorf("could not create collection indexer: %w", err) - } - - builder.ExecutionIndexerCore = indexer.New( - builder.Logger, - metrics.NewExecutionStateIndexerCollector(), - builder.ProtocolDB, - builder.Storage.RegisterIndex, - builder.Storage.Headers, - builder.events, - builder.Storage.Collections, - builder.Storage.Transactions, - builder.lightTransactionResults, - builder.scheduledTransactions, - builder.RootChainID, - indexerDerivedChainData, - collectionIndexer, - collectionExecutedMetric, - node.StorageLockMgr, - builder.ExtendedIndexer, - ) - - // start processing from the first height of the registers db, which is initialized from - // the checkpoint. this ensures a consistent starting point for the indexed data. - indexedBlockHeight, err := indexedBlockHeightInitializer.Initialize(builder.Storage.RegisterIndex.FirstHeight()) - if err != nil { - return nil, fmt.Errorf("could not initialize indexed block height: %w", err) - } - - // execution state worker uses a jobqueue to process new execution data and indexes it by using the indexer. - builder.ExecutionIndexer, err = indexer.NewIndexer( - builder.Logger, - builder.Storage.RegisterIndex.FirstHeight(), - builder.Storage.RegisterIndex, - builder.ExecutionIndexerCore, - executionDataStoreCache, - builder.ExecutionDataRequester.HighestConsecutiveHeight, - indexedBlockHeight, - ) - if err != nil { - return nil, err - } - - if executionDataPrunerEnabled { - builder.ExecutionDataPruner.RegisterHeightRecorder(builder.ExecutionIndexer) - } - - // setup requester to notify indexer when new execution data is received - execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.ExecutionIndexer.OnExecutionData) - - err = builder.Reporter.Initialize(builder.ExecutionIndexer) - if err != nil { - return nil, err - } - // create script execution module, this depends on the indexer being initialized and the // having the register storage bootstrapped scripts := execution.NewScripts( @@ -1604,83 +1554,123 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS builder.RootChainID, computation.NewProtocolStateWrapper(builder.State), builder.Storage.Headers, - builder.ExecutionIndexerCore.RegisterValue, + builder.Storage.RegisterIndex.Get, builder.scriptExecutorConfig, queryDerivedChainData, builder.programCacheSize > 0, ) - err = builder.ScriptExecutor.Initialize(builder.ExecutionIndexer, scripts, builder.VersionControl) + err = builder.ScriptExecutor.Initialize(builder.Storage.RegisterIndex, scripts, builder.VersionControl) if err != nil { - return nil, err + return nil, fmt.Errorf("could not initialize script executor: %w", err) } err = builder.RegistersAsyncStore.Initialize(builder.Storage.RegisterIndex) if err != nil { - return nil, err + return nil, fmt.Errorf("could not initialize registers async store: %w", err) } + scriptExecutorDependendable.Init(&module.NoopReadyDoneAware{}) + + // the script executor is not a component. it is being started as a DependableComponent + // to ensure dependencies are setup in the correct order. + return &module.NoopReadyDoneAware{}, nil + }, scriptExecutorDependencies). + DependableComponent("execution data indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // Note: using a DependableComponent here to ensure that the indexer does not block + // other components from starting while bootstrapping the register db since it may + // take hours to complete. + + var collectionExecutedMetric module.CollectionExecutedMetric = metrics.NewNoopCollector() + collectionIndexer, err := collections.NewIndexer( + builder.Logger, + builder.ProtocolDB, + collectionExecutedMetric, + builder.State, + builder.Storage.Blocks, + builder.Storage.Collections, + builder.lastFullBlockHeight, + builder.StorageLockMgr, + ) + if err != nil { + return nil, fmt.Errorf("could not create collection indexer: %w", err) + } - if builder.stopControlEnabled { - builder.StopControl.RegisterHeightRecorder(builder.ExecutionIndexer) - } + builder.ExecutionIndexerCore = indexer.New( + builder.Logger, + metrics.NewExecutionStateIndexerCollector(), + builder.ProtocolDB, + builder.Storage.RegisterIndex, + builder.Storage.Headers, + builder.events, + builder.Storage.Collections, + builder.Storage.Transactions, + builder.lightTransactionResults, + builder.scheduledTransactions, + builder.RootChainID, + indexerDerivedChainData, // might be nil if program caching is disabled + collectionIndexer, + collectionExecutedMetric, + node.StorageLockMgr, + builder.ExtendedIndexer, + ) - return builder.ExecutionIndexer, nil - }, builder.IndexerDependencies) + // start processing from the first height of the registers db, which is initialized from + // the checkpoint. this ensures a consistent starting point for the indexed data. + indexedBlockHeight, err := indexedBlockHeightInitializer.Initialize(builder.Storage.RegisterIndex.FirstHeight()) + if err != nil { + return nil, fmt.Errorf("could not initialize indexed block height: %w", err) + } - if !builder.extendedIndexingEnabled { - extendedIndexerDependable.Init(&module.NoopReadyDoneAware{}) - } else { - builder.DependableComponent("extended indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - accountTransactions, err := extended.NewAccountTransactions( - node.Logger, - builder.ExtendedStorage.AccountTransactionsBootstrapper, - node.RootChainID, - utils.NotNil(builder.StorageLockMgr), + // execution state worker uses a jobqueue to process new execution data and indexes it by using the indexer. + builder.ExecutionIndexer, err = indexer.NewIndexer( + builder.Logger, + builder.Storage.RegisterIndex.FirstHeight(), + builder.Storage.RegisterIndex, + builder.ExecutionIndexerCore, + executionDataStoreCache, + builder.ExecutionDataRequester.HighestConsecutiveHeight, + indexedBlockHeight, ) if err != nil { - return nil, fmt.Errorf("could not create account transactions indexer: %w", err) + return nil, err } - ftTransfers := extended.NewFungibleTokenTransfers( - node.Logger, - node.RootChainID, - builder.ExtendedStorage.FungibleTokenTransfersBootstrapper, - ) + if executionDataPrunerEnabled { + builder.ExecutionDataPruner.RegisterHeightRecorder(builder.ExecutionIndexer) + } - nftTransfers := extended.NewNonFungibleTokenTransfers( - node.Logger, - node.RootChainID, - builder.ExtendedStorage.NonFungibleTokenTransfersBootstrapper, - ) + // setup requester to notify indexer when new execution data is received + execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.ExecutionIndexer.OnExecutionData) - scheduledTransactions := extended.NewScheduledTransactions( - node.Logger, - builder.ExtendedStorage.ScheduledTransactionsBootstrapper, - builder.ScriptExecutor, - node.RootChainID, - ) + err = builder.Reporter.Initialize(builder.ExecutionIndexer) + if err != nil { + return nil, err + } - extendedIndexers := []extended.Indexer{ - accountTransactions, - ftTransfers, - nftTransfers, - scheduledTransactions, + if builder.stopControlEnabled { + builder.StopControl.RegisterHeightRecorder(builder.ExecutionIndexer) } - extendedIndexer, err := extended.NewExtendedIndexer( + return builder.ExecutionIndexer, nil + }, builder.IndexerDependencies) + + if !builder.extendedIndexingEnabled { + extendedIndexerDependable.Init(&module.NoopReadyDoneAware{}) + } else { + builder.DependableComponent("extended indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + extendedIndexer, err := extendedbootstrap.BootstrapIndexers( node.Logger, - metrics.NewExtendedIndexingCollector(), - builder.ExtendedStorage.DB, + node.RootChainID, + utils.NotNil(builder.ExtendedStorage), utils.NotNil(builder.StorageLockMgr), - utils.NotNil(builder.State), + utils.NotNil(node.State), utils.NotNil(builder.Storage.Index), utils.NotNil(builder.Storage.Headers), utils.NotNil(builder.Storage.Guarantees), utils.NotNil(builder.Storage.Collections), utils.NotNil(builder.events), utils.NotNil(builder.lightTransactionResults), - extendedIndexers, - node.RootChainID, + utils.NotNil(builder.ScriptExecutor), builder.extendedIndexingBackfillDelay, ) if err != nil { @@ -1691,7 +1681,7 @@ func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverS extendedIndexerDependable.Init(builder.ExtendedIndexer) return builder.ExtendedIndexer, nil - }, cmd.NewDependencyList()) + }, extendedIndexerDependencies) } } diff --git a/engine/access/rpc/backend/script_executor.go b/engine/access/rpc/backend/script_executor.go index fc4ea418e51..c6a5912a12d 100644 --- a/engine/access/rpc/backend/script_executor.go +++ b/engine/access/rpc/backend/script_executor.go @@ -11,10 +11,15 @@ import ( "github.com/onflow/flow-go/engine/common/version" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/execution" - "github.com/onflow/flow-go/module/state_synchronization" "github.com/onflow/flow-go/storage" ) +// indexReporter provides information about the data available in the registers database +type indexReporter interface { + LatestHeight() uint64 + FirstHeight() uint64 +} + // ErrIncompatibleNodeVersion indicates that node version is incompatible with the block version var ErrIncompatibleNodeVersion = errors.New("node version is incompatible with data for block") @@ -25,7 +30,7 @@ type ScriptExecutor struct { scriptExecutor *execution.Scripts // indexReporter provides information about the current state of the execution state indexer. - indexReporter state_synchronization.IndexReporter + indexReporter indexReporter // versionControl provides information about the current version beacon for each block versionControl *version.VersionControl @@ -73,7 +78,7 @@ func (s *ScriptExecutor) SetMaxCompatibleHeight(height uint64) { // This method can be called at any time after the ScriptExecutor object is created. Any requests // made to the other methods will return storage.ErrHeightNotIndexed until this method is called. func (s *ScriptExecutor) Initialize( - indexReporter state_synchronization.IndexReporter, + indexReporter indexReporter, scriptExecutor *execution.Scripts, versionControl *version.VersionControl, ) error { @@ -196,20 +201,11 @@ func (s *ScriptExecutor) checkHeight(height uint64) error { return fmt.Errorf("%w: script executor not initialized", storage.ErrHeightNotIndexed) } - highestHeight, err := s.indexReporter.HighestIndexedHeight() - if err != nil { - return fmt.Errorf("could not get highest indexed height: %w", err) - } - if height > highestHeight { + if height > s.indexReporter.LatestHeight() { return fmt.Errorf("%w: block not indexed yet", storage.ErrHeightNotIndexed) } - lowestHeight, err := s.indexReporter.LowestIndexedHeight() - if err != nil { - return fmt.Errorf("could not get lowest indexed height: %w", err) - } - - if height < lowestHeight { + if height < s.indexReporter.FirstHeight() { return fmt.Errorf("%w: block is before lowest indexed height", storage.ErrHeightNotIndexed) } diff --git a/engine/access/rpc/backend/script_executor_test.go b/engine/access/rpc/backend/script_executor_test.go index 2156a82ebb9..321283765cb 100644 --- a/engine/access/rpc/backend/script_executor_test.go +++ b/engine/access/rpc/backend/script_executor_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/engine/access/index" "github.com/onflow/flow-go/engine/common/version" "github.com/onflow/flow-go/engine/execution/computation/query" "github.com/onflow/flow-go/engine/execution/testutil" @@ -20,12 +19,9 @@ import ( "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/execution" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/state_synchronization/indexer" - syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" "github.com/onflow/flow-go/storage" storageMock "github.com/onflow/flow-go/storage/mock" @@ -42,8 +38,6 @@ type ScriptExecutorSuite struct { log zerolog.Logger registerIndex storage.RegisterIndex versionControl *version.VersionControl - reporter *syncmock.IndexReporter - indexReporter *index.Reporter scripts *execution.Scripts chain flow.Chain dbDir string @@ -97,15 +91,9 @@ func (s *ScriptExecutorSuite) bootstrap() { // SetupTest sets up the test environment for each test in the suite. // This includes initializing various components and mock objects needed for the tests. func (s *ScriptExecutorSuite) SetupTest() { - lockManager := storage.NewTestingLockManager() s.log = unittest.Logger() s.chain = flow.Emulator.Chain() - s.reporter = syncmock.NewIndexReporter(s.T()) - s.indexReporter = index.NewReporter() - err := s.indexReporter.Initialize(s.reporter) - require.NoError(s.T(), err) - blockchain := unittest.BlockchainFixture(10) s.headers = newBlockHeadersStorage(blockchain) s.height = blockchain[0].Height @@ -129,32 +117,13 @@ func (s *ScriptExecutorSuite) SetupTest() { derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) s.Require().NoError(err) - indexerCore := indexer.New( - s.log, - module.ExecutionStateIndexerMetrics(metrics.NewNoopCollector()), - nil, - s.registerIndex, - s.headers, - nil, - nil, - nil, - nil, - nil, - s.chain.ChainID(), - derivedChainData, - nil, - metrics.NewNoopCollector(), - lockManager, - nil, // accountTxIndex - ) - s.scripts = execution.NewScripts( s.log, metrics.NewNoopCollector(), s.chain.ChainID(), protocolState, s.headers, - indexerCore.RegisterValue, + s.registerIndex.Get, query.NewDefaultConfig(), derivedChainData, true, @@ -177,16 +146,13 @@ func (s *ScriptExecutorSuite) TestExecuteAtBlockHeight() { var scriptArgs [][]byte var expectedResult = []byte("{\"type\":\"Void\"}\n") - s.reporter.On("LowestIndexedHeight").Return(s.height, nil) - // This test simulates the behavior when the version beacon is not set in the script executor, // but it should still work by omitting the version control checks. s.Run("test script execution without version control", func() { scriptExec := NewScriptExecutor(s.log, uint64(0), math.MaxUint64) - s.reporter.On("HighestIndexedHeight").Return(s.height+1, nil).Once() // Initialize the script executor without version control - err := scriptExec.Initialize(s.indexReporter, s.scripts, nil) + err := scriptExec.Initialize(s.registerIndex, s.scripts, nil) s.Require().NoError(err) // Execute the script at the specified block height @@ -236,9 +202,8 @@ func (s *ScriptExecutorSuite) TestExecuteAtBlockHeight() { // Initialize the script executor with version control scriptExec := NewScriptExecutor(s.log, uint64(0), math.MaxUint64) - s.reporter.On("HighestIndexedHeight").Return(s.height+1, nil) - err = scriptExec.Initialize(s.indexReporter, s.scripts, s.versionControl) + err = scriptExec.Initialize(s.registerIndex, s.scripts, s.versionControl) s.Require().NoError(err) // Execute the script at the specified block height @@ -288,9 +253,8 @@ func (s *ScriptExecutorSuite) TestExecuteAtBlockHeight() { // Initialize the script executor with version control scriptExec := NewScriptExecutor(s.log, uint64(0), math.MaxUint64) - s.reporter.On("HighestIndexedHeight").Return(s.height+1, nil) - err = scriptExec.Initialize(s.indexReporter, s.scripts, s.versionControl) + err = scriptExec.Initialize(s.registerIndex, s.scripts, s.versionControl) s.Require().NoError(err) // Execute the script at the specified block height diff --git a/module/execution/scripts.go b/module/execution/scripts.go index 7eeb0ced5a2..43cbc2e364c 100644 --- a/module/execution/scripts.go +++ b/module/execution/scripts.go @@ -2,6 +2,7 @@ package execution import ( "context" + "errors" "github.com/rs/zerolog" @@ -202,6 +203,15 @@ func (s *Scripts) GetAccountKey(ctx context.Context, address flow.Address, keyIn return s.executor.GetAccountKey(ctx, address, keyIndex, header, snap) } +// RegisterValue retrieves register values by the register IDs at the provided block height. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if the register is not found at the given height. +// - [storage.ErrHeightNotIndexed]: if the given height was not indexed yet or lower than the first indexed height. +func (s *Scripts) RegisterValue(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + return s.registerAtHeight(ID, height) +} + // snapshotWithBlock is a common function for executing scripts and get account functionality. // It creates a storage snapshot that is needed by the FVM to execute scripts. func (s *Scripts) snapshotWithBlock(height uint64) (snapshot.StorageSnapshot, *flow.Header, error) { @@ -211,7 +221,16 @@ func (s *Scripts) snapshotWithBlock(height uint64) (snapshot.StorageSnapshot, *f } storageSnapshot := snapshot.NewReadFuncStorageSnapshot(func(ID flow.RegisterID) (flow.RegisterValue, error) { - return s.registerAtHeight(ID, height) + value, err := s.registerAtHeight(ID, height) + if err != nil { + // the storage snapshot consumer expects the snapshot to return nil if the register is not found + // instead of an error. + if errors.Is(err, storage.ErrNotFound) { + return nil, nil + } + return nil, err + } + return value, nil }) return storageSnapshot, header, nil diff --git a/module/execution/scripts_test.go b/module/execution/scripts_test.go index 1532e8b2bc0..3cb832e119d 100644 --- a/module/execution/scripts_test.go +++ b/module/execution/scripts_test.go @@ -24,7 +24,6 @@ import ( "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/state_synchronization/indexer" synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" "github.com/onflow/flow-go/storage" pebbleStorage "github.com/onflow/flow-go/storage/pebble" @@ -153,8 +152,65 @@ func (s *scriptTestSuite) TestGetAccountKeys() { } +func (s *scriptTestSuite) TestRegisterValue() { + regAddress := unittest.RandomAddressFixture() + registerID := flow.NewRegisterID(regAddress, "test_key") + value := flow.RegisterValue([]byte("test_value")) + + s.Run("returns stored register value", func() { + s.height++ + err := s.registerIndex.Store(flow.RegisterEntries{{Key: registerID, Value: value}}, s.height) + s.Require().NoError(err) + + result, err := s.scripts.RegisterValue(registerID, s.height) + s.Require().NoError(err) + s.Assert().Equal(value, result) + }) + + s.Run("returns ErrNotFound for missing register at indexed height", func() { + missingID := flow.NewRegisterID(unittest.RandomAddressFixture(), "missing_key") + result, err := s.scripts.RegisterValue(missingID, s.height) + s.Assert().ErrorIs(err, storage.ErrNotFound) + s.Assert().Nil(result) + }) + + s.Run("returns most recently indexed value when queried at later height", func() { + lateRegID := flow.NewRegisterID(unittest.RandomAddressFixture(), "late_key") + v1 := flow.RegisterValue([]byte("v1")) + v2 := flow.RegisterValue([]byte("v2")) + + h1 := s.height + 1 + err := s.registerIndex.Store(flow.RegisterEntries{{Key: lateRegID, Value: v1}}, h1) + s.Require().NoError(err) + + // advance height without storing lateRegID + h2 := h1 + 1 + err = s.registerIndex.Store(flow.RegisterEntries{}, h2) + s.Require().NoError(err) + + // querying at h2 returns v1 (stored at h1) + result, err := s.scripts.RegisterValue(lateRegID, h2) + s.Require().NoError(err) + s.Assert().Equal(v1, result) + + // store v2 at h3 + h3 := h2 + 1 + err = s.registerIndex.Store(flow.RegisterEntries{{Key: lateRegID, Value: v2}}, h3) + s.Require().NoError(err) + s.height = h3 + + result, err = s.scripts.RegisterValue(lateRegID, h3) + s.Require().NoError(err) + s.Assert().Equal(v2, result) + + // querying at h2 still returns v1 + result, err = s.scripts.RegisterValue(lateRegID, h2) + s.Require().NoError(err) + s.Assert().Equal(v1, result) + }) +} + func (s *scriptTestSuite) SetupTest() { - lockManager := storage.NewTestingLockManager() logger := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) entropyProvider := testutil.ProtocolStateWithSourceFixture(nil) blockchain := unittest.BlockchainFixture(10) @@ -179,32 +235,13 @@ func (s *scriptTestSuite) SetupTest() { derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) s.Require().NoError(err) - index := indexer.New( - logger, - metrics.NewNoopCollector(), - nil, - s.registerIndex, - headers, - nil, - nil, - nil, - nil, - nil, - flow.Testnet, - derivedChainData, - nil, - nil, - lockManager, - nil, // accountTxIndex - ) - s.scripts = NewScripts( logger, metrics.NewNoopCollector(), s.chain.ChainID(), entropyProvider, headers, - index.RegisterValue, + s.registerIndex.Get, query.NewDefaultConfig(), derivedChainData, true, diff --git a/module/state_synchronization/indexer/extended/bootstrap.go b/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go similarity index 60% rename from module/state_synchronization/indexer/extended/bootstrap.go rename to module/state_synchronization/indexer/extended/bootstrap/bootstrap.go index b23231daa61..774bf1a8932 100644 --- a/module/state_synchronization/indexer/extended/bootstrap.go +++ b/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go @@ -1,14 +1,21 @@ -package extended +package bootstrap import ( "fmt" + "time" "github.com/rs/zerolog" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/state_synchronization/indexer/extended" + "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/indexes" "github.com/onflow/flow-go/storage/operation/pebbleimpl" pstorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/utils" ) type Storage struct { @@ -79,3 +86,77 @@ func OpenExtendedIndexDB( ScheduledTransactionsBootstrapper: scheduledTxStore, }, nil } + +func BootstrapIndexers( + log zerolog.Logger, + chainID flow.ChainID, + extendedStorage Storage, + lockManager storage.LockManager, + state protocol.State, + index storage.Index, + headers storage.Headers, + guarantees storage.Guarantees, + collections storage.Collections, + events storage.Events, + results storage.LightTransactionResults, + scriptExecutor execution.ScriptExecutor, + backfillDelay time.Duration, +) (*extended.ExtendedIndexer, error) { + accountTransactions, err := extended.NewAccountTransactions( + log, + extendedStorage.AccountTransactionsBootstrapper, + chainID, + utils.NotNil(lockManager), + ) + if err != nil { + return nil, fmt.Errorf("could not create account transactions indexer: %w", err) + } + + ftTransfers := extended.NewFungibleTokenTransfers( + log, + chainID, + extendedStorage.FungibleTokenTransfersBootstrapper, + ) + + nftTransfers := extended.NewNonFungibleTokenTransfers( + log, + chainID, + extendedStorage.NonFungibleTokenTransfersBootstrapper, + ) + + scheduledTransactions := extended.NewScheduledTransactions( + log, + extendedStorage.ScheduledTransactionsBootstrapper, + scriptExecutor, + chainID, + ) + + extendedIndexers := []extended.Indexer{ + accountTransactions, + ftTransfers, + nftTransfers, + scheduledTransactions, + } + + extendedIndexer, err := extended.NewExtendedIndexer( + log, + metrics.NewExtendedIndexingCollector(), + extendedStorage.DB, + lockManager, + state, + index, + headers, + guarantees, + collections, + events, + results, + extendedIndexers, + chainID, + backfillDelay, + ) + if err != nil { + return nil, fmt.Errorf("could not create extended indexer: %w", err) + } + + return extendedIndexer, nil +} diff --git a/module/state_synchronization/indexer/extended/events/helpers.go b/module/state_synchronization/indexer/extended/events/helpers.go index f67f488da1d..801a2207cfc 100644 --- a/module/state_synchronization/indexer/extended/events/helpers.go +++ b/module/state_synchronization/indexer/extended/events/helpers.go @@ -45,8 +45,8 @@ func AddressFromOptional(opt cadence.Optional) (flow.Address, error) { return flow.BytesToAddress(addr.Bytes()), nil } -// PathFromOptional extracts a path string ("domain/identifier") from a [cadence.Optional] -// containing a [cadence.Path]. Returns "" if the optional is empty. +// PathFromOptional extracts a path string from a [cadence.Optional] containing a [cadence.Path]. +// Returns the path string (e.g "/public/identifier"), or "" if the optional is empty. // // Any error indicates that the optional value is not a valid path. func PathFromOptional(opt cadence.Optional) (string, error) { diff --git a/module/state_synchronization/indexer/extended/events/scheduled_transaction.go b/module/state_synchronization/indexer/extended/events/scheduled_transaction.go index a67e0f224f6..428eddd6d45 100644 --- a/module/state_synchronization/indexer/extended/events/scheduled_transaction.go +++ b/module/state_synchronization/indexer/extended/events/scheduled_transaction.go @@ -19,7 +19,7 @@ type TransactionSchedulerScheduledEvent struct { TransactionHandlerOwner flow.Address TransactionHandlerTypeIdentifier string TransactionHandlerUUID uint64 - TransactionHandlerPublicPath string // "domain/identifier", or "" if absent + TransactionHandlerPublicPath string } // TransactionSchedulerPendingExecutionEvent represents a decoded FlowTransactionScheduler.PendingExecution event, @@ -42,7 +42,7 @@ type TransactionSchedulerExecutedEvent struct { TransactionHandlerOwner flow.Address TransactionHandlerTypeIdentifier string TransactionHandlerUUID uint64 - TransactionHandlerPublicPath string // "domain/identifier", or "" if absent + TransactionHandlerPublicPath string } // TransactionSchedulerCanceledEvent represents a decoded FlowTransactionScheduler.Canceled event, diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go index e1d67be2c65..5cafa8c5687 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go @@ -120,7 +120,7 @@ func TestScheduledTransactionsIndexer_ScheduledEventPublicPath(t *testing.T) { tx, err := store.ByID(1) require.NoError(t, err) - assert.Equal(t, "public/handlerCapability", tx.TransactionHandlerPublicPath) + assert.Equal(t, "/public/handlerCapability", tx.TransactionHandlerPublicPath) } // TestScheduledTransactionsIndexer_ExecutedWithPending verifies that a tx scheduled at height 1 diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go index 79678ac1f15..3d0d1816d77 100644 --- a/module/state_synchronization/indexer/indexer_core.go +++ b/module/state_synchronization/indexer/indexer_core.go @@ -106,28 +106,6 @@ func New( } } -// RegisterValue retrieves register values by the register IDs at the provided block height. -// Even if the register wasn't indexed at the provided height, returns the highest height the register was indexed at. -// If a register is not found it will return a nil value and not an error. -// -// Expected error returns during normal operation: -// - [storage.ErrHeightNotIndexed]: if the given height was not indexed yet or lower than the first indexed height. -func (c *IndexerCore) RegisterValue(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { - value, err := c.registers.Get(ID, height) - if err != nil { - // only return an error if the error doesn't match the not found error, since we have - // to gracefully handle not found values and instead assign nil, that is because the script executor - // expects that behaviour - if errors.Is(err, storage.ErrNotFound) { - return nil, nil - } - - return nil, err - } - - return value, nil -} - // IndexBlockData indexes all execution block data by height. // This method shouldn't be used concurrently. // Expected error returns during normal operations: diff --git a/module/state_synchronization/indexer/indexer_core_test.go b/module/state_synchronization/indexer/indexer_core_test.go index b4919bbd8eb..f4961439127 100644 --- a/module/state_synchronization/indexer/indexer_core_test.go +++ b/module/state_synchronization/indexer/indexer_core_test.go @@ -57,7 +57,6 @@ type indexCoreTest struct { firstHeightStore func(t *testing.T) uint64 registersStore func(t *testing.T, entries flow.RegisterEntries, height uint64) error eventsStore func(t *testing.T, ID flow.Identifier, events []flow.EventsList) error - registersGet func(t *testing.T, IDs flow.RegisterID, height uint64) (flow.RegisterValue, error) } func newIndexCoreTest( @@ -166,15 +165,6 @@ func (i *indexCoreTest) setStoreTransactionResults(f func(*testing.T, flow.Ident return i } -func (i *indexCoreTest) setGetRegisters(f func(t *testing.T, ID flow.RegisterID, height uint64) (flow.RegisterValue, error)) *indexCoreTest { - i.registers. - On("Get", mock.AnythingOfType("flow.RegisterID"), mock.AnythingOfType("uint64")). - Return(func(IDs flow.RegisterID, height uint64) (flow.RegisterValue, error) { - return f(i.t, IDs, height) - }) - return i -} - func (i *indexCoreTest) useDefaultEvents() *indexCoreTest { i.events. On("BatchStore", @@ -257,11 +247,6 @@ func (i *indexCoreTest) runIndexBlockData() error { return i.indexer.IndexBlockData(i.data) } -func (i *indexCoreTest) runGetRegister(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { - i.initIndexer() - return i.indexer.RegisterValue(ID, height) -} - func TestExecutionState_IndexBlockData(t *testing.T) { g := fixtures.NewGeneratorSuite() blocks := g.Blocks().List(4) @@ -557,29 +542,6 @@ func TestExecutionState_IndexBlockData(t *testing.T) { }) } -func TestExecutionState_RegisterValues(t *testing.T) { - g := fixtures.NewGeneratorSuite() - t.Run("Get value for single register", func(t *testing.T) { - blocks := g.Blocks().List(5) - height := blocks[1].Height - id := flow.RegisterID{ - Owner: "1", - Key: "2", - } - val := flow.RegisterValue("0x1") - - values, err := newIndexCoreTest(t, g, blocks, nil). - initIndexer(). - setGetRegisters(func(t *testing.T, ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { - return val, nil - }). - runGetRegister(id, height) - - assert.NoError(t, err) - assert.Equal(t, values, val) - }) -} - func newBlockHeadersStorage(blocks []*flow.Block) storage.Headers { blocksByID := make(map[flow.Identifier]*flow.Block, 0) for _, b := range blocks { @@ -591,10 +553,6 @@ func newBlockHeadersStorage(blocks []*flow.Block) storage.Headers { func TestIndexerIntegration_StoreAndGet(t *testing.T) { lockManager := storage.NewTestingLockManager() - regOwnerAddress := unittest.RandomAddressFixture() - regOwner := string(regOwnerAddress.Bytes()) - regKey := "code" - registerID := flow.NewRegisterID(regOwnerAddress, regKey) pdb, dbDir := unittest.TempPebbleDB(t) t.Cleanup(func() { @@ -607,120 +565,6 @@ func TestIndexerIntegration_StoreAndGet(t *testing.T) { derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) require.NoError(t, err) - // this test makes sure index values for a single register are correctly updated and always last value is returned - t.Run("Single Index Value Changes", func(t *testing.T) { - pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { - index := New( - logger, - module.ExecutionStateIndexerMetrics(metrics), - pebbleimpl.ToDB(pdb), - registers, - nil, - nil, - nil, - nil, - nil, - nil, - flow.Testnet, - derivedChainData, - collectionsmock.NewCollectionIndexer(t), - nil, - lockManager, - nil, // accountTxIndex - ) - - values := [][]byte{[]byte("1"), []byte("1"), []byte("2"), []byte("3"), []byte("4")} - for i, val := range values { - testDesc := fmt.Sprintf("test iteration number %d failed with test value %s", i, val) - height := uint64(i + 1) - err := storeRegisterWithValue(index, height, regOwner, regKey, val) - require.NoError(t, err) - - results, err := index.RegisterValue(registerID, height) - require.NoError(t, err, testDesc) - assert.Equal(t, val, results) - } - }) - }) - - // this test makes sure if a register is not found the value returned is nil and without an error in order for this to be - // up to the specification script executor requires - t.Run("Missing Register", func(t *testing.T) { - pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { - index := New( - logger, - module.ExecutionStateIndexerMetrics(metrics), - pebbleimpl.ToDB(pdb), - registers, - nil, - nil, - nil, - nil, - nil, - nil, - flow.Testnet, - derivedChainData, - collectionsmock.NewCollectionIndexer(t), - nil, - lockManager, - nil, // accountTxIndex - ) - - value, err := index.RegisterValue(registerID, 0) - require.Nil(t, value) - assert.NoError(t, err) - }) - }) - - // this test makes sure that even if indexed values for a single register are requested with higher height - // the correct highest height indexed value is returned. - // e.g. we index A{h(1) -> X}, A{h(2) -> Y}, when we request h(4) we get value Y - t.Run("Single Index Value At Later Heights", func(t *testing.T) { - pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { - index := New( - logger, - module.ExecutionStateIndexerMetrics(metrics), - pebbleimpl.ToDB(pdb), - registers, - nil, - nil, - nil, - nil, - nil, - nil, - flow.Testnet, - derivedChainData, - collectionsmock.NewCollectionIndexer(t), - nil, - lockManager, - nil, // accountTxIndex - ) - - storeValues := [][]byte{[]byte("1"), []byte("2")} - - require.NoError(t, storeRegisterWithValue(index, 1, regOwner, regKey, storeValues[0])) - - require.NoError(t, index.indexRegisters(nil, 2)) - - value, err := index.RegisterValue(registerID, uint64(2)) - require.NoError(t, err) - assert.Equal(t, storeValues[0], value) - - require.NoError(t, index.indexRegisters(nil, 3)) - - err = storeRegisterWithValue(index, 4, regOwner, regKey, storeValues[1]) - require.NoError(t, err) - - value, err = index.RegisterValue(registerID, uint64(4)) - require.NoError(t, err) - assert.Equal(t, storeValues[1], value) - - value, err = index.RegisterValue(registerID, uint64(3)) - require.NoError(t, err) - assert.Equal(t, storeValues[0], value) - }) - }) - // this test makes sure we correctly handle weird payloads t.Run("Empty and Nil Payloads", func(t *testing.T) { pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { @@ -813,12 +657,6 @@ func TestCollectScheduledTransactions(t *testing.T) { }) } -// helper to store register at height and increment index range -func storeRegisterWithValue(indexer *IndexerCore, height uint64, owner string, key string, value []byte) error { - payload := LedgerPayloadFixture(owner, key, value) - return indexer.indexRegisters(map[ledger.Path]*ledger.Payload{ledger.DummyPath: payload}, height) -} - // newMockStateForBlock returns a mock protocol.State configured so that // AtBlockID returns the block's header. func newMockStateForBlock(t *testing.T, block *flow.Block) *protocolmock.State { From 4224be4d680a7398c407bd69f538c4059ab73766 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 06:54:06 -0800 Subject: [PATCH 03/18] allow optionals in script response, but require the value is set --- .../scheduled_transaction_requester.go | 11 ++++-- .../scheduled_transaction_requester_test.go | 35 +++++++++++++++++++ .../indexer/extended/test_helpers_test.go | 25 +++++++++++-- 3 files changed, 66 insertions(+), 5 deletions(-) diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go index b46a56f87ce..ad52ea8f13d 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go @@ -119,8 +119,15 @@ func (r *ScheduledTransactionRequester) fetchMissingTxs( return nil, fmt.Errorf("expected Array result, got %T", results) } - for _, result := range array.Values { - decoded, err := decodeTransactionData(result) + for i, result := range array.Values { + opt, ok := result.(cadence.Optional) + if !ok { + return nil, fmt.Errorf("expected Optional at index %d, got %T", i, result) + } + if opt.Value == nil { + return nil, fmt.Errorf("scheduled transaction %d had event, but is not found on-chain", batch[i]) + } + decoded, err := decodeTransactionData(opt.Value) if err != nil { return nil, fmt.Errorf("failed to decode scheduled transaction: %w", err) } diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go index 73a0517f4c8..9417fb8f600 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go @@ -126,6 +126,41 @@ func TestScheduledTransactionRequester_FailedEntry(t *testing.T) { assert.Equal(t, executorTxID, txs[0].ExecutedTransactionID) } +// TestScheduledTransactionRequester_NilOptional verifies that when the script returns a nil +// optional for an ID (transaction not found on-chain), Fetch returns an error. +func TestScheduledTransactionRequester_NilOptional(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(flow.Testnet) + owner := unittest.RandomAddressFixture() + executorMock := executionmock.NewScriptExecutor(t) + requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) + + // ID 10 exists on-chain; ID 11 does not (nil optional). + comp := MakeTransactionDataComposite(sc, 10, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 10) + response := MakeJITScriptResponseWithNils( + t, + []cadence.Composite{comp, comp}, // second entry is a nil optional; value is ignored + []bool{false, true}, + ) + executorMock.On("ExecuteAtBlockHeight", + mock.Anything, + getTransactionDataScript(flow.Testnet), + encodeUInt64Args(t, 10, 11), + requesterTestHeight, + ).Return(response, nil).Once() + + data := &scheduledTransactionData{ + executedEntries: []executedEntry{ + {event: &events.TransactionSchedulerExecutedEvent{ID: 10}, transactionID: unittest.IdentifierFixture()}, + {event: &events.TransactionSchedulerExecutedEvent{ID: 11}, transactionID: unittest.IdentifierFixture()}, + }, + } + _, err := requester.Fetch(context.Background(), []uint64{10, 11}, requesterTestHeight, data) + require.Error(t, err) + require.ErrorContains(t, err, "is not found on-chain") +} + // TestScheduledTransactionRequester_ScriptError verifies that an error from the script // executor is propagated from Fetch. func TestScheduledTransactionRequester_ScriptError(t *testing.T) { diff --git a/module/state_synchronization/indexer/extended/test_helpers_test.go b/module/state_synchronization/indexer/extended/test_helpers_test.go index 1ba5129f28a..12408fee3d6 100644 --- a/module/state_synchronization/indexer/extended/test_helpers_test.go +++ b/module/state_synchronization/indexer/extended/test_helpers_test.go @@ -57,13 +57,32 @@ func MakeTransactionDataComposite( }).WithType(typ) } -// MakeJITScriptResponse encodes a slice of TransactionData composites as a JSON-CDC array, -// the format returned by a getTransactionData script execution. +// MakeJITScriptResponse encodes a slice of TransactionData composites as a JSON-CDC array +// of optionals, matching the [FlowTransactionScheduler.TransactionData?] return type of the +// getTransactionData script. func MakeJITScriptResponse(t *testing.T, composites ...cadence.Composite) []byte { t.Helper() values := make([]cadence.Value, len(composites)) for i, c := range composites { - values[i] = c + values[i] = cadence.NewOptional(c) + } + encoded, err := jsoncdc.Encode(cadence.NewArray(values)) + require.NoError(t, err) + return encoded +} + +// MakeJITScriptResponseWithNils encodes a mix of TransactionData composites and nil optionals +// as a JSON-CDC array of optionals. nils[i] == true means that slot is a nil optional. +func MakeJITScriptResponseWithNils(t *testing.T, composites []cadence.Composite, nils []bool) []byte { + t.Helper() + require.Equal(t, len(composites), len(nils), "composites and nils must have the same length") + values := make([]cadence.Value, len(composites)) + for i, c := range composites { + if nils[i] { + values[i] = cadence.NewOptional(nil) + } else { + values[i] = cadence.NewOptional(c) + } } encoded, err := jsoncdc.Encode(cadence.NewArray(values)) require.NoError(t, err) From b59bbf8228e80eee630b2af11b1ba4cc52321bb2 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 07:36:55 -0800 Subject: [PATCH 04/18] fix script result parsing --- .../indexer/extended/events/helpers.go | 17 +++++++++++++++++ .../extended/scheduled_transaction_data.go | 9 +++++++-- .../extended/scheduled_transaction_data_test.go | 14 ++++++++++++-- .../extended/scheduled_transaction_requester.go | 6 +++--- .../scheduled_transaction_requester_test.go | 14 +++++++------- 5 files changed, 46 insertions(+), 14 deletions(-) diff --git a/module/state_synchronization/indexer/extended/events/helpers.go b/module/state_synchronization/indexer/extended/events/helpers.go index 801a2207cfc..6d90d9db695 100644 --- a/module/state_synchronization/indexer/extended/events/helpers.go +++ b/module/state_synchronization/indexer/extended/events/helpers.go @@ -60,6 +60,23 @@ func PathFromOptional(opt cadence.Optional) (string, error) { return path.String(), nil } +// EnumToType extracts the raw value from a Cadence enum and converts it to T. +// Cadence enums encode their raw value in a field named "rawValue". +// +// Any error indicates that the enum is malformed or the rawValue is not convertible to T. +func EnumToType[T any](enum cadence.Enum) (T, error) { + var zero T + raw := enum.SearchFieldByName("rawValue") + if raw == nil { + return zero, fmt.Errorf("enum has no rawValue field") + } + v, ok := raw.(T) + if !ok { + return zero, fmt.Errorf("expected %T rawValue, got %T", zero, raw) + } + return v, nil +} + // HexToEVMAddress decodes a hex string to an EVM address. // This is the same logic as `common.HexToAddress`, except it returns an error if the hex string is // not valid hex or an incorrect length. diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_data.go b/module/state_synchronization/indexer/extended/scheduled_transaction_data.go index fd90c8580cc..073c814fd3c 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_data.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_data.go @@ -101,7 +101,7 @@ func DecodeTransactionDataResults(response []byte, ids []uint64) (map[uint64]*ac func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, error) { type transactionDataRaw struct { ID uint64 `cadence:"id"` - Priority uint8 `cadence:"priority"` + Priority cadence.Enum `cadence:"priority"` Timestamp cadence.UFix64 `cadence:"timestamp"` ExecutionEffort uint64 `cadence:"executionEffort"` Fees cadence.UFix64 `cadence:"fees"` @@ -121,6 +121,11 @@ func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, er return access.ScheduledTransaction{}, fmt.Errorf("failed to decode TransactionData fields: %w", err) } + priorityRaw, err := events.EnumToType[cadence.UInt8](raw.Priority) + if err != nil { + return access.ScheduledTransaction{}, fmt.Errorf("failed to decode TransactionData 'priority' field: %w", err) + } + publicPath, err := events.PathFromOptional(raw.TransactionHandlerPublicPath) if err != nil { return access.ScheduledTransaction{}, fmt.Errorf("failed to decode 'transactionHandlerPublicPath' field: %w", err) @@ -128,7 +133,7 @@ func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, er return access.ScheduledTransaction{ ID: raw.ID, - Priority: access.ScheduledTransactionPriority(raw.Priority), + Priority: access.ScheduledTransactionPriority(priorityRaw), Timestamp: uint64(raw.Timestamp), ExecutionEffort: raw.ExecutionEffort, Fees: uint64(raw.Fees), diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go index f42a364c913..d01dbdd39b1 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go @@ -213,12 +213,22 @@ func makeDecodeTransactionDataOptional( ) cadence.Value { addr := common.Address(sc.FlowTransactionScheduler.Address) loc := common.NewAddressLocation(nil, addr, sc.FlowTransactionScheduler.Name) + + priorityEnumType := cadence.NewEnumType( + loc, + "Priority", + cadence.UInt8Type, + []cadence.Field{{Identifier: "rawValue", Type: cadence.UInt8Type}}, + nil, + ) + priorityEnum := cadence.NewEnum([]cadence.Value{cadence.UInt8(priority)}).WithType(priorityEnumType) + typ := cadence.NewStructType( loc, "TransactionData", []cadence.Field{ {Identifier: "id", Type: cadence.UInt64Type}, - {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "priority", Type: priorityEnumType}, {Identifier: "timestamp", Type: cadence.UFix64Type}, {Identifier: "executionEffort", Type: cadence.UInt64Type}, {Identifier: "fees", Type: cadence.UFix64Type}, @@ -231,7 +241,7 @@ func makeDecodeTransactionDataOptional( ) comp := cadence.NewStruct([]cadence.Value{ cadence.UInt64(id), - cadence.UInt8(priority), + priorityEnum, cadence.UFix64(timestamp), cadence.UInt64(executionEffort), cadence.UFix64(fees), diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go index ad52ea8f13d..4810a7e0d17 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go @@ -34,7 +34,7 @@ type ScheduledTransactionRequester struct { func NewScheduledTransactionRequester(executor scriptExecutor, chainID flow.ChainID) *ScheduledTransactionRequester { return &ScheduledTransactionRequester{ executor: executor, - script: getTransactionDataScript(chainID), + script: GetTransactionDataScript(chainID), } } @@ -138,9 +138,9 @@ func (r *ScheduledTransactionRequester) fetchMissingTxs( return missingTxs, nil } -// getTransactionDataScript returns the Cadence script used for JIT scheduled transaction +// GetTransactionDataScript returns the Cadence script used for JIT scheduled transaction // lookups on the given chain. Exposed for testing. -func getTransactionDataScript(chainID flow.ChainID) []byte { +func GetTransactionDataScript(chainID flow.ChainID) []byte { sc := systemcontracts.SystemContractsForChain(chainID) return []byte(fmt.Sprintf(getTransactionDataScriptTemplate, sc.FlowTransactionScheduler.Address.Hex())) } diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go index 9417fb8f600..bf597de260f 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go @@ -35,7 +35,7 @@ func TestScheduledTransactionRequester_ExecutedEntry(t *testing.T) { comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 99) executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, 5), requesterTestHeight, ).Return(MakeJITScriptResponse(t, comp), nil).Once() @@ -71,7 +71,7 @@ func TestScheduledTransactionRequester_CancelledEntry(t *testing.T) { comp := MakeTransactionDataComposite(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler", 77) executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, 7), requesterTestHeight, ).Return(MakeJITScriptResponse(t, comp), nil).Once() @@ -108,7 +108,7 @@ func TestScheduledTransactionRequester_FailedEntry(t *testing.T) { comp := MakeTransactionDataComposite(sc, 42, 1, 3000, 200, 80, owner, "A.xyz.Contract.Handler", 15) executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, 42), requesterTestHeight, ).Return(MakeJITScriptResponse(t, comp), nil).Once() @@ -145,7 +145,7 @@ func TestScheduledTransactionRequester_NilOptional(t *testing.T) { ) executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, 10, 11), requesterTestHeight, ).Return(response, nil).Once() @@ -172,7 +172,7 @@ func TestScheduledTransactionRequester_ScriptError(t *testing.T) { scriptErr := fmt.Errorf("script execution failed") executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, 9), requesterTestHeight, ).Return([]byte(nil), scriptErr).Once() @@ -212,13 +212,13 @@ func TestScheduledTransactionRequester_Batching(t *testing.T) { } executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, batch1IDs...), requesterTestHeight, ).Return(MakeJITScriptResponse(t, batch1Composites...), nil).Once() executorMock.On("ExecuteAtBlockHeight", mock.Anything, - getTransactionDataScript(flow.Testnet), + GetTransactionDataScript(flow.Testnet), encodeUInt64Args(t, 51), requesterTestHeight, ).Return(MakeJITScriptResponse(t, batch2Composite), nil).Once() From ececd6ea587078dfc6f1b39ad437a59c1fd6c187 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 08:59:33 -0800 Subject: [PATCH 05/18] fix schedule tx naming in script response --- model/access/scheduled_transaction.go | 3 +- .../extended/scheduled_transaction_data.go | 23 +++++---------- .../scheduled_transaction_data_test.go | 28 +++++++----------- .../scheduled_transaction_requester.go | 2 +- .../scheduled_transaction_requester_test.go | 13 ++++----- .../extended/scheduled_transactions_test.go | 2 +- .../indexer/extended/test_helpers_test.go | 29 +++++++++++-------- 7 files changed, 44 insertions(+), 56 deletions(-) diff --git a/model/access/scheduled_transaction.go b/model/access/scheduled_transaction.go index bdfdd0ce38f..231958f9904 100644 --- a/model/access/scheduled_transaction.go +++ b/model/access/scheduled_transaction.go @@ -114,7 +114,8 @@ type ScheduledTransaction struct { // IsPlaceholder is true if the scheduled transaction was created based on the current chain state, // and not based on a protocol event. This happens when the index is bootstrapped after the original // transaction where the scheduled transaction was first created. - // When true, the `CreatedTransactionID` field is undefined. + // When true, the `CreatedTransactionID`, `TransactionHandlerUUID`, and `TransactionHandlerPublicPath` + // fields are undefined. IsPlaceholder bool // Expansion fields populated when expandResults is true. Never persisted. diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_data.go b/module/state_synchronization/indexer/extended/scheduled_transaction_data.go index 073c814fd3c..1acc415bb8d 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_data.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_data.go @@ -100,15 +100,13 @@ func DecodeTransactionDataResults(response []byte, ids []uint64) (map[uint64]*ac // Any error indicates that the value is malformed. func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, error) { type transactionDataRaw struct { - ID uint64 `cadence:"id"` - Priority cadence.Enum `cadence:"priority"` - Timestamp cadence.UFix64 `cadence:"timestamp"` - ExecutionEffort uint64 `cadence:"executionEffort"` - Fees cadence.UFix64 `cadence:"fees"` - TransactionHandlerOwner cadence.Address `cadence:"transactionHandlerOwner"` - TransactionHandlerTypeIdentifier string `cadence:"transactionHandlerTypeIdentifier"` - TransactionHandlerUUID uint64 `cadence:"transactionHandlerUUID"` - TransactionHandlerPublicPath cadence.Optional `cadence:"transactionHandlerPublicPath"` + ID uint64 `cadence:"id"` + Priority cadence.Enum `cadence:"priority"` + Timestamp cadence.UFix64 `cadence:"scheduledTimestamp"` + ExecutionEffort uint64 `cadence:"executionEffort"` + Fees cadence.UFix64 `cadence:"fees"` + TransactionHandlerOwner cadence.Address `cadence:"handlerAddress"` + TransactionHandlerTypeIdentifier string `cadence:"handlerTypeIdentifier"` } composite, ok := value.(cadence.Composite) @@ -126,11 +124,6 @@ func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, er return access.ScheduledTransaction{}, fmt.Errorf("failed to decode TransactionData 'priority' field: %w", err) } - publicPath, err := events.PathFromOptional(raw.TransactionHandlerPublicPath) - if err != nil { - return access.ScheduledTransaction{}, fmt.Errorf("failed to decode 'transactionHandlerPublicPath' field: %w", err) - } - return access.ScheduledTransaction{ ID: raw.ID, Priority: access.ScheduledTransactionPriority(priorityRaw), @@ -139,8 +132,6 @@ func decodeTransactionData(value cadence.Value) (access.ScheduledTransaction, er Fees: uint64(raw.Fees), TransactionHandlerOwner: flow.Address(raw.TransactionHandlerOwner), TransactionHandlerTypeIdentifier: raw.TransactionHandlerTypeIdentifier, - TransactionHandlerUUID: raw.TransactionHandlerUUID, - TransactionHandlerPublicPath: publicPath, Status: access.ScheduledTxStatusScheduled, }, nil } diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go index d01dbdd39b1..9678367e418 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_data_test.go @@ -70,8 +70,8 @@ func TestDecodeTransactionDataResults_AllFound(t *testing.T) { owner := unittest.RandomAddressFixture() ids := []uint64{5, 7} - comp5 := makeDecodeTransactionDataOptional(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 55) - comp7 := makeDecodeTransactionDataOptional(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler", 77) + comp5 := makeDecodeTransactionDataOptional(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler") + comp7 := makeDecodeTransactionDataOptional(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler") response := encodeOptionalArray(t, comp5, comp7) @@ -83,12 +83,9 @@ func TestDecodeTransactionDataResults_AllFound(t *testing.T) { require.True(t, ok) assert.Equal(t, uint64(5), tx5.ID) assert.Equal(t, access.ScheduledTxStatusScheduled, tx5.Status) - assert.Equal(t, uint64(55), tx5.TransactionHandlerUUID) - tx7, ok := results[7] require.True(t, ok) assert.Equal(t, uint64(7), tx7.ID) - assert.Equal(t, uint64(77), tx7.TransactionHandlerUUID) } // TestDecodeTransactionDataResults_SomeNil verifies that nil Optional elements are omitted @@ -100,9 +97,9 @@ func TestDecodeTransactionDataResults_SomeNil(t *testing.T) { owner := unittest.RandomAddressFixture() ids := []uint64{1, 2, 3} - comp1 := makeDecodeTransactionDataOptional(sc, 1, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1) + comp1 := makeDecodeTransactionDataOptional(sc, 1, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler") nilOpt := cadence.NewOptional(nil) - comp3 := makeDecodeTransactionDataOptional(sc, 3, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 3) + comp3 := makeDecodeTransactionDataOptional(sc, 3, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler") response := encodeOptionalArray(t, comp1, nilOpt, comp3) @@ -129,7 +126,7 @@ func TestDecodeTransactionDataResults_WrongCount(t *testing.T) { ids := []uint64{1, 2} // Only one element in the response instead of two. - comp1 := makeDecodeTransactionDataOptional(sc, 1, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 1) + comp1 := makeDecodeTransactionDataOptional(sc, 1, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler") response := encodeOptionalArray(t, comp1) _, err := DecodeTransactionDataResults(response, ids) @@ -204,12 +201,11 @@ func makeDecodeTransactionDataOptional( sc *systemcontracts.SystemContracts, id uint64, priority uint8, - timestamp uint64, + scheduledTimestamp uint64, executionEffort uint64, fees uint64, owner flow.Address, typeIdentifier string, - uuid uint64, ) cadence.Value { addr := common.Address(sc.FlowTransactionScheduler.Address) loc := common.NewAddressLocation(nil, addr, sc.FlowTransactionScheduler.Name) @@ -229,26 +225,22 @@ func makeDecodeTransactionDataOptional( []cadence.Field{ {Identifier: "id", Type: cadence.UInt64Type}, {Identifier: "priority", Type: priorityEnumType}, - {Identifier: "timestamp", Type: cadence.UFix64Type}, + {Identifier: "scheduledTimestamp", Type: cadence.UFix64Type}, {Identifier: "executionEffort", Type: cadence.UInt64Type}, {Identifier: "fees", Type: cadence.UFix64Type}, - {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, - {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, - {Identifier: "transactionHandlerUUID", Type: cadence.UInt64Type}, - {Identifier: "transactionHandlerPublicPath", Type: cadence.NewOptionalType(cadence.PublicPathType)}, + {Identifier: "handlerAddress", Type: cadence.AddressType}, + {Identifier: "handlerTypeIdentifier", Type: cadence.StringType}, }, nil, ) comp := cadence.NewStruct([]cadence.Value{ cadence.UInt64(id), priorityEnum, - cadence.UFix64(timestamp), + cadence.UFix64(scheduledTimestamp), cadence.UInt64(executionEffort), cadence.UFix64(fees), cadence.NewAddress(owner), cadence.String(typeIdentifier), - cadence.UInt64(uuid), - cadence.NewOptional(nil), }).WithType(typ) return cadence.NewOptional(comp) } diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go index 4810a7e0d17..b150465e996 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go @@ -129,7 +129,7 @@ func (r *ScheduledTransactionRequester) fetchMissingTxs( } decoded, err := decodeTransactionData(opt.Value) if err != nil { - return nil, fmt.Errorf("failed to decode scheduled transaction: %w", err) + return nil, fmt.Errorf("failed to decode scheduled transaction %d: %w", batch[i], err) } missingTxs[decoded.ID] = decoded } diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go index bf597de260f..9319597d213 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester_test.go @@ -32,7 +32,7 @@ func TestScheduledTransactionRequester_ExecutedEntry(t *testing.T) { requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) executedTxID := unittest.IdentifierFixture() - comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 99) + comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler") executorMock.On("ExecuteAtBlockHeight", mock.Anything, GetTransactionDataScript(flow.Testnet), @@ -54,7 +54,6 @@ func TestScheduledTransactionRequester_ExecutedEntry(t *testing.T) { assert.Equal(t, uint64(5), txs[0].ID) assert.Equal(t, access.ScheduledTxStatusExecuted, txs[0].Status) assert.Equal(t, executedTxID, txs[0].ExecutedTransactionID) - assert.Equal(t, uint64(99), txs[0].TransactionHandlerUUID) } // TestScheduledTransactionRequester_CancelledEntry verifies that Fetch correctly applies @@ -68,7 +67,7 @@ func TestScheduledTransactionRequester_CancelledEntry(t *testing.T) { requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) cancelTxID := unittest.IdentifierFixture() - comp := MakeTransactionDataComposite(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler", 77) + comp := MakeTransactionDataComposite(sc, 7, 2, 2000, 400, 150, owner, "A.def.Contract.Handler") executorMock.On("ExecuteAtBlockHeight", mock.Anything, GetTransactionDataScript(flow.Testnet), @@ -105,7 +104,7 @@ func TestScheduledTransactionRequester_FailedEntry(t *testing.T) { requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) executorTxID := unittest.IdentifierFixture() - comp := MakeTransactionDataComposite(sc, 42, 1, 3000, 200, 80, owner, "A.xyz.Contract.Handler", 15) + comp := MakeTransactionDataComposite(sc, 42, 1, 3000, 200, 80, owner, "A.xyz.Contract.Handler") executorMock.On("ExecuteAtBlockHeight", mock.Anything, GetTransactionDataScript(flow.Testnet), @@ -137,7 +136,7 @@ func TestScheduledTransactionRequester_NilOptional(t *testing.T) { requester := NewScheduledTransactionRequester(executorMock, flow.Testnet) // ID 10 exists on-chain; ID 11 does not (nil optional). - comp := MakeTransactionDataComposite(sc, 10, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 10) + comp := MakeTransactionDataComposite(sc, 10, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler") response := MakeJITScriptResponseWithNils( t, []cadence.Composite{comp, comp}, // second entry is a nil optional; value is ignored @@ -202,9 +201,9 @@ func TestScheduledTransactionRequester_Batching(t *testing.T) { var batch1Composites []cadence.Composite for i := range 50 { - batch1Composites = append(batch1Composites, MakeTransactionDataComposite(sc, uint64(i+1), 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", uint64(i+1))) + batch1Composites = append(batch1Composites, MakeTransactionDataComposite(sc, uint64(i+1), 1, 1000, 100, 50, owner, "A.abc.Contract.Handler")) } - batch2Composite := MakeTransactionDataComposite(sc, 51, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler", 51) + batch2Composite := MakeTransactionDataComposite(sc, 51, 1, 1000, 100, 50, owner, "A.abc.Contract.Handler") batch1IDs := make([]uint64, 50) for i := range 50 { diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go index 5cafa8c5687..8efda819482 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go @@ -747,7 +747,7 @@ func TestScheduledTransactionsIndexer_JITLookup(t *testing.T) { executedEvt.TransactionID = executedTxID scriptHeight := header.Height - 1 - comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler", 99) + comp := MakeTransactionDataComposite(sc, 5, 1, 1000, 300, 100, owner, "A.abc.Contract.Handler") scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, scriptHeight, ).Return(MakeJITScriptResponse(t, comp), nil).Once() diff --git a/module/state_synchronization/indexer/extended/test_helpers_test.go b/module/state_synchronization/indexer/extended/test_helpers_test.go index 12408fee3d6..a15b9272bf9 100644 --- a/module/state_synchronization/indexer/extended/test_helpers_test.go +++ b/module/state_synchronization/indexer/extended/test_helpers_test.go @@ -19,41 +19,46 @@ func MakeTransactionDataComposite( sc *systemcontracts.SystemContracts, id uint64, priority uint8, - timestamp uint64, + scheduledTimestamp uint64, executionEffort uint64, fees uint64, owner flow.Address, typeIdentifier string, - uuid uint64, ) cadence.Composite { addr := common.Address(sc.FlowTransactionScheduler.Address) loc := common.NewAddressLocation(nil, addr, sc.FlowTransactionScheduler.Name) + + priorityEnumType := cadence.NewEnumType( + loc, + "Priority", + cadence.UInt8Type, + []cadence.Field{{Identifier: "rawValue", Type: cadence.UInt8Type}}, + nil, + ) + priorityEnum := cadence.NewEnum([]cadence.Value{cadence.UInt8(priority)}).WithType(priorityEnumType) + typ := cadence.NewStructType( loc, "TransactionData", []cadence.Field{ {Identifier: "id", Type: cadence.UInt64Type}, - {Identifier: "priority", Type: cadence.UInt8Type}, - {Identifier: "timestamp", Type: cadence.UFix64Type}, + {Identifier: "priority", Type: priorityEnumType}, + {Identifier: "scheduledTimestamp", Type: cadence.UFix64Type}, {Identifier: "executionEffort", Type: cadence.UInt64Type}, {Identifier: "fees", Type: cadence.UFix64Type}, - {Identifier: "transactionHandlerOwner", Type: cadence.AddressType}, - {Identifier: "transactionHandlerTypeIdentifier", Type: cadence.StringType}, - {Identifier: "transactionHandlerUUID", Type: cadence.UInt64Type}, - {Identifier: "transactionHandlerPublicPath", Type: cadence.NewOptionalType(cadence.PublicPathType)}, + {Identifier: "handlerAddress", Type: cadence.AddressType}, + {Identifier: "handlerTypeIdentifier", Type: cadence.StringType}, }, nil, ) return cadence.NewStruct([]cadence.Value{ cadence.UInt64(id), - cadence.UInt8(priority), - cadence.UFix64(timestamp), + priorityEnum, + cadence.UFix64(scheduledTimestamp), cadence.UInt64(executionEffort), cadence.UFix64(fees), cadence.NewAddress(owner), cadence.String(typeIdentifier), - cadence.UInt64(uuid), - cadence.NewOptional(nil), }).WithType(typ) } From a8e04d5b13a1e9c65490c977f95a2917c953d95d Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 09:54:49 -0800 Subject: [PATCH 06/18] add metrics --- module/metrics.go | 8 +++ module/metrics/extended_indexing.go | 31 ++++++++++- module/metrics/noop.go | 3 +- module/mock/extended_indexing_metrics.go | 54 +++++++++++++++++++ .../indexer/extended/bootstrap/bootstrap.go | 5 +- .../extended/scheduled_transactions.go | 16 +++++- .../extended/scheduled_transactions_test.go | 11 ++-- 7 files changed, 117 insertions(+), 11 deletions(-) diff --git a/module/metrics.go b/module/metrics.go index 2afe591d9ac..12a2c526009 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -829,6 +829,14 @@ type ExecutionStateIndexerMetrics interface { type ExtendedIndexingMetrics interface { // BlockIndexedExtended records the latest processed height for a given extended indexer. BlockIndexedExtended(indexer string, height uint64) + + // ScheduledTransactionIndexed records counts of scheduled transactions processed in a single block. + // scheduled is the number of newly created scheduled transactions. + // executed is the number marked as executed. + // failed is the number marked as failed. + // canceled is the number marked as canceled. + // backfilled is the number fetched from state because they were unknown to the local index. + ScheduledTransactionIndexed(scheduled, executed, failed, canceled, backfilled int) } type TransactionErrorMessagesMetrics interface { diff --git a/module/metrics/extended_indexing.go b/module/metrics/extended_indexing.go index 10be9ed35cb..5deb1dfab32 100644 --- a/module/metrics/extended_indexing.go +++ b/module/metrics/extended_indexing.go @@ -10,7 +10,9 @@ import ( var _ module.ExtendedIndexingMetrics = (*ExtendedIndexingCollector)(nil) type ExtendedIndexingCollector struct { - indexedHeight *prometheus.GaugeVec + indexedHeight *prometheus.GaugeVec + scheduledTxCount *prometheus.CounterVec + scheduledTxBackfillCount prometheus.Counter } func NewExtendedIndexingCollector() module.ExtendedIndexingMetrics { @@ -21,8 +23,24 @@ func NewExtendedIndexingCollector() module.ExtendedIndexingMetrics { Help: "latest processed height for extended indexers", }, []string{"indexer"}) + scheduledTxCount := promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExtendedIndexing, + Name: "scheduledtx_total", + Help: "total number of scheduled transactions processed, by status", + }, []string{"status"}) + + scheduledTxBackfillCount := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExtendedIndexing, + Name: "scheduledtx_backfilled_total", + Help: "total number of scheduled transactions backfilled from state", + }) + return &ExtendedIndexingCollector{ - indexedHeight: indexedHeight, + indexedHeight: indexedHeight, + scheduledTxCount: scheduledTxCount, + scheduledTxBackfillCount: scheduledTxBackfillCount, } } @@ -30,3 +48,12 @@ func NewExtendedIndexingCollector() module.ExtendedIndexingMetrics { func (c *ExtendedIndexingCollector) BlockIndexedExtended(indexer string, height uint64) { c.indexedHeight.WithLabelValues(indexer).Set(float64(height)) } + +// ScheduledTransactionIndexed records counts of scheduled transactions processed in a single block. +func (c *ExtendedIndexingCollector) ScheduledTransactionIndexed(scheduled, executed, failed, canceled, backfilled int) { + c.scheduledTxCount.WithLabelValues("scheduled").Add(float64(scheduled)) + c.scheduledTxCount.WithLabelValues("executed").Add(float64(executed)) + c.scheduledTxCount.WithLabelValues("failed").Add(float64(failed)) + c.scheduledTxCount.WithLabelValues("canceled").Add(float64(canceled)) + c.scheduledTxBackfillCount.Add(float64(backfilled)) +} diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 625f18d4a7e..cc0642bc4a1 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -379,7 +379,8 @@ func (nc *NoopCollector) InitializeLatestHeight(height uint64) {} var _ module.ExtendedIndexingMetrics = (*NoopCollector)(nil) -func (nc *NoopCollector) BlockIndexedExtended(string, uint64) {} +func (nc *NoopCollector) BlockIndexedExtended(string, uint64) {} +func (nc *NoopCollector) ScheduledTransactionIndexed(int, int, int, int, int) {} var _ module.TransactionErrorMessagesMetrics = (*NoopCollector)(nil) diff --git a/module/mock/extended_indexing_metrics.go b/module/mock/extended_indexing_metrics.go index 4c0377fbb79..3db38332b03 100644 --- a/module/mock/extended_indexing_metrics.go +++ b/module/mock/extended_indexing_metrics.go @@ -80,3 +80,57 @@ func (_c *ExtendedIndexingMetrics_BlockIndexedExtended_Call) RunAndReturn(run fu _c.Run(run) return _c } + +// ScheduledTransactionIndexed provides a mock function for the type ExtendedIndexingMetrics +func (_mock *ExtendedIndexingMetrics) ScheduledTransactionIndexed(scheduled int, executed int, failed int, canceled int, backfilled int) { + _mock.Called(scheduled, executed, failed, canceled, backfilled) + return +} + +// ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScheduledTransactionIndexed' +type ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call struct { + *mock.Call +} + +// ScheduledTransactionIndexed is a helper method to define mock.On call +// - scheduled int +// - executed int +// - failed int +// - canceled int +// - backfilled int +func (_e *ExtendedIndexingMetrics_Expecter) ScheduledTransactionIndexed(scheduled interface{}, executed interface{}, failed interface{}, canceled interface{}, backfilled interface{}) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { + return &ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call{Call: _e.mock.On("ScheduledTransactionIndexed", scheduled, executed, failed, canceled, backfilled)} +} + +func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) Run(run func(scheduled int, executed int, failed int, canceled int, backfilled int)) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0, arg1, arg2, arg3, arg4 int + if args[0] != nil { + arg0 = args[0].(int) + } + if args[1] != nil { + arg1 = args[1].(int) + } + if args[2] != nil { + arg2 = args[2].(int) + } + if args[3] != nil { + arg3 = args[3].(int) + } + if args[4] != nil { + arg4 = args[4].(int) + } + run(arg0, arg1, arg2, arg3, arg4) + }) + return _c +} + +func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) Return() *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { + _c.Call.Return() + return _c +} + +func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) RunAndReturn(run func(int, int, int, int, int)) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { + _c.Run(run) + return _c +} diff --git a/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go b/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go index 774bf1a8932..062f909b2ef 100644 --- a/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go +++ b/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go @@ -124,10 +124,13 @@ func BootstrapIndexers( extendedStorage.NonFungibleTokenTransfersBootstrapper, ) + extendedMetrics := metrics.NewExtendedIndexingCollector() + scheduledTransactions := extended.NewScheduledTransactions( log, extendedStorage.ScheduledTransactionsBootstrapper, scriptExecutor, + extendedMetrics, chainID, ) @@ -140,7 +143,7 @@ func BootstrapIndexers( extendedIndexer, err := extended.NewExtendedIndexer( log, - metrics.NewExtendedIndexingCollector(), + extendedMetrics, extendedStorage.DB, lockManager, state, diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions.go b/module/state_synchronization/indexer/extended/scheduled_transactions.go index 9ba98f56ffd..57958f22cb2 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transactions.go +++ b/module/state_synchronization/indexer/extended/scheduled_transactions.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/events" "github.com/onflow/flow-go/storage" ) @@ -48,8 +49,9 @@ const scheduledTransactionsIndexerName = "scheduled_transactions" // // Not safe for concurrent use. type ScheduledTransactions struct { - log zerolog.Logger - store storage.ScheduledTransactionsIndexBootstrapper + log zerolog.Logger + store storage.ScheduledTransactionsIndexBootstrapper + metrics module.ExtendedIndexingMetrics scheduledExecutorAddr flow.Address @@ -97,6 +99,7 @@ func NewScheduledTransactions( log zerolog.Logger, store storage.ScheduledTransactionsIndexBootstrapper, scriptExecutor scriptExecutor, + metrics module.ExtendedIndexingMetrics, chainID flow.ChainID, ) *ScheduledTransactions { sc := systemcontracts.SystemContractsForChain(chainID) @@ -106,6 +109,7 @@ func NewScheduledTransactions( return &ScheduledTransactions{ log: log.With().Str("component", "scheduled_tx_indexer").Logger(), store: store, + metrics: metrics, requester: NewScheduledTransactionRequester(scriptExecutor, chainID), scheduledExecutorAddr: sc.ScheduledTransactionExecutor.Address, scheduledEventType: flow.EventType(prefix + "Scheduled"), @@ -211,6 +215,14 @@ func (s *ScheduledTransactions) IndexBlockData(lctx lockctx.Proof, data BlockDat } } + s.metrics.ScheduledTransactionIndexed( + len(collected.newTxs)-len(missingIDs), + len(collected.executedEntries), + len(collected.failedEntries), + len(collected.canceledEntries), + len(missingIDs), + ) + return nil } diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go index 8efda819482..c905064f499 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transactions_test.go +++ b/module/state_synchronization/indexer/extended/scheduled_transactions_test.go @@ -19,6 +19,7 @@ import ( "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" executionmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/indexes" "github.com/onflow/flow-go/storage/indexes/iterator" @@ -627,7 +628,7 @@ func TestScheduledTransactionsIndexer_NextHeight_MockErrors(t *testing.T) { unexpectedErr := fmt.Errorf("disk I/O failure") mockStore.On("LatestIndexedHeight").Return(uint64(0), unexpectedErr) - indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, flow.Testnet) + indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, metrics.NewNoopCollector(), flow.Testnet) _, err := indexer.NextHeight() require.Error(t, err) @@ -639,7 +640,7 @@ func TestScheduledTransactionsIndexer_NextHeight_MockErrors(t *testing.T) { mockStore.On("LatestIndexedHeight").Return(uint64(0), storage.ErrNotBootstrapped) mockStore.On("UninitializedFirstHeight").Return(uint64(42), true) - indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, flow.Testnet) + indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, metrics.NewNoopCollector(), flow.Testnet) _, err := indexer.NextHeight() require.Error(t, err) @@ -655,7 +656,7 @@ func TestScheduledTransactionsIndexer_NextHeight_MockErrors(t *testing.T) { mockStore.On("Store", mock.Anything, mock.Anything, testHeight, mock.Anything).Return(storeErr) lm := storage.NewTestingLockManager() - indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, flow.Testnet) + indexer := NewScheduledTransactions(unittest.Logger(), mockStore, nil, metrics.NewNoopCollector(), flow.Testnet) header := unittest.BlockHeaderFixtureOnChain(flow.Testnet, unittest.WithHeaderHeight(testHeight)) err := unittest.WithLock(t, lm, storage.LockIndexScheduledTransactionsIndex, func(lctx lockctx.Context) error { @@ -685,7 +686,7 @@ func newScheduledTxIndexerForTest( store, err := indexes.NewScheduledTransactionsBootstrapper(db, firstHeight) require.NoError(t, err) - indexer := NewScheduledTransactions(unittest.Logger(), store, nil, chainID) + indexer := NewScheduledTransactions(unittest.Logger(), store, nil, metrics.NewNoopCollector(), chainID) return indexer, store, lm, db } @@ -784,7 +785,7 @@ func newScheduledTxIndexerWithScriptExecutor( store, err := indexes.NewScheduledTransactionsBootstrapper(db, firstHeight) require.NoError(t, err) - indexer := NewScheduledTransactions(unittest.Logger(), store, scriptExecutor, chainID) + indexer := NewScheduledTransactions(unittest.Logger(), store, scriptExecutor, metrics.NewNoopCollector(), chainID) return indexer, store, lm, db } From a6157db7d75e2243fa76d721fb49c87fa3974db7 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 10:23:51 -0800 Subject: [PATCH 07/18] add transfer metrics --- module/metrics.go | 6 ++ module/metrics/extended_indexing.go | 28 +++++++ module/metrics/noop.go | 4 +- module/mock/extended_indexing_metrics.go | 76 +++++++++++++++++++ .../indexer/extended/account_ft_transfers.go | 6 ++ .../extended/account_ft_transfers_test.go | 17 +++-- .../indexer/extended/account_nft_transfers.go | 6 ++ .../extended/account_nft_transfers_test.go | 11 +-- .../indexer/extended/bootstrap/bootstrap.go | 6 +- 9 files changed, 144 insertions(+), 16 deletions(-) diff --git a/module/metrics.go b/module/metrics.go index 12a2c526009..b3c0eb1d978 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -837,6 +837,12 @@ type ExtendedIndexingMetrics interface { // canceled is the number marked as canceled. // backfilled is the number fetched from state because they were unknown to the local index. ScheduledTransactionIndexed(scheduled, executed, failed, canceled, backfilled int) + + // FTTransferIndexed records the number of fungible token transfers indexed for a single block. + FTTransferIndexed(count int) + + // NFTTransferIndexed records the number of non-fungible token transfers indexed for a single block. + NFTTransferIndexed(count int) } type TransactionErrorMessagesMetrics interface { diff --git a/module/metrics/extended_indexing.go b/module/metrics/extended_indexing.go index 5deb1dfab32..d94f641bd4e 100644 --- a/module/metrics/extended_indexing.go +++ b/module/metrics/extended_indexing.go @@ -13,6 +13,8 @@ type ExtendedIndexingCollector struct { indexedHeight *prometheus.GaugeVec scheduledTxCount *prometheus.CounterVec scheduledTxBackfillCount prometheus.Counter + ftTransferCount prometheus.Counter + nftTransferCount prometheus.Counter } func NewExtendedIndexingCollector() module.ExtendedIndexingMetrics { @@ -37,10 +39,26 @@ func NewExtendedIndexingCollector() module.ExtendedIndexingMetrics { Help: "total number of scheduled transactions backfilled from state", }) + ftTransferCount := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExtendedIndexing, + Name: "ft_transfers_total", + Help: "total number of fungible token transfers indexed", + }) + + nftTransferCount := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExtendedIndexing, + Name: "nft_transfers_total", + Help: "total number of non-fungible token transfers indexed", + }) + return &ExtendedIndexingCollector{ indexedHeight: indexedHeight, scheduledTxCount: scheduledTxCount, scheduledTxBackfillCount: scheduledTxBackfillCount, + ftTransferCount: ftTransferCount, + nftTransferCount: nftTransferCount, } } @@ -57,3 +75,13 @@ func (c *ExtendedIndexingCollector) ScheduledTransactionIndexed(scheduled, execu c.scheduledTxCount.WithLabelValues("canceled").Add(float64(canceled)) c.scheduledTxBackfillCount.Add(float64(backfilled)) } + +// FTTransferIndexed records the number of fungible token transfers indexed for a single block. +func (c *ExtendedIndexingCollector) FTTransferIndexed(count int) { + c.ftTransferCount.Add(float64(count)) +} + +// NFTTransferIndexed records the number of non-fungible token transfers indexed for a single block. +func (c *ExtendedIndexingCollector) NFTTransferIndexed(count int) { + c.nftTransferCount.Add(float64(count)) +} diff --git a/module/metrics/noop.go b/module/metrics/noop.go index cc0642bc4a1..7d06c06a82a 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -379,8 +379,10 @@ func (nc *NoopCollector) InitializeLatestHeight(height uint64) {} var _ module.ExtendedIndexingMetrics = (*NoopCollector)(nil) -func (nc *NoopCollector) BlockIndexedExtended(string, uint64) {} +func (nc *NoopCollector) BlockIndexedExtended(string, uint64) {} func (nc *NoopCollector) ScheduledTransactionIndexed(int, int, int, int, int) {} +func (nc *NoopCollector) FTTransferIndexed(int) {} +func (nc *NoopCollector) NFTTransferIndexed(int) {} var _ module.TransactionErrorMessagesMetrics = (*NoopCollector)(nil) diff --git a/module/mock/extended_indexing_metrics.go b/module/mock/extended_indexing_metrics.go index 3db38332b03..9cb9404846e 100644 --- a/module/mock/extended_indexing_metrics.go +++ b/module/mock/extended_indexing_metrics.go @@ -134,3 +134,79 @@ func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) RunAndReturn _c.Run(run) return _c } + +// FTTransferIndexed provides a mock function for the type ExtendedIndexingMetrics +func (_mock *ExtendedIndexingMetrics) FTTransferIndexed(count int) { + _mock.Called(count) + return +} + +// ExtendedIndexingMetrics_FTTransferIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FTTransferIndexed' +type ExtendedIndexingMetrics_FTTransferIndexed_Call struct { + *mock.Call +} + +// FTTransferIndexed is a helper method to define mock.On call +// - count int +func (_e *ExtendedIndexingMetrics_Expecter) FTTransferIndexed(count interface{}) *ExtendedIndexingMetrics_FTTransferIndexed_Call { + return &ExtendedIndexingMetrics_FTTransferIndexed_Call{Call: _e.mock.On("FTTransferIndexed", count)} +} + +func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) Run(run func(count int)) *ExtendedIndexingMetrics_FTTransferIndexed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 int + if args[0] != nil { + arg0 = args[0].(int) + } + run(arg0) + }) + return _c +} + +func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) Return() *ExtendedIndexingMetrics_FTTransferIndexed_Call { + _c.Call.Return() + return _c +} + +func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) RunAndReturn(run func(int)) *ExtendedIndexingMetrics_FTTransferIndexed_Call { + _c.Run(run) + return _c +} + +// NFTTransferIndexed provides a mock function for the type ExtendedIndexingMetrics +func (_mock *ExtendedIndexingMetrics) NFTTransferIndexed(count int) { + _mock.Called(count) + return +} + +// ExtendedIndexingMetrics_NFTTransferIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NFTTransferIndexed' +type ExtendedIndexingMetrics_NFTTransferIndexed_Call struct { + *mock.Call +} + +// NFTTransferIndexed is a helper method to define mock.On call +// - count int +func (_e *ExtendedIndexingMetrics_Expecter) NFTTransferIndexed(count interface{}) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { + return &ExtendedIndexingMetrics_NFTTransferIndexed_Call{Call: _e.mock.On("NFTTransferIndexed", count)} +} + +func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) Run(run func(count int)) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 int + if args[0] != nil { + arg0 = args[0].(int) + } + run(arg0) + }) + return _c +} + +func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) Return() *ExtendedIndexingMetrics_NFTTransferIndexed_Call { + _c.Call.Return() + return _c +} + +func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) RunAndReturn(run func(int)) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { + _c.Run(run) + return _c +} diff --git a/module/state_synchronization/indexer/extended/account_ft_transfers.go b/module/state_synchronization/indexer/extended/account_ft_transfers.go index d20ed6c377f..9ba6f873578 100644 --- a/module/state_synchronization/indexer/extended/account_ft_transfers.go +++ b/module/state_synchronization/indexer/extended/account_ft_transfers.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/transfers" "github.com/onflow/flow-go/storage" ) @@ -27,6 +28,7 @@ type FungibleTokenTransfers struct { log zerolog.Logger ftParser *transfers.FTParser ftStore storage.FungibleTokenTransfersBootstrapper + metrics module.ExtendedIndexingMetrics } var _ Indexer = (*FungibleTokenTransfers)(nil) @@ -36,11 +38,13 @@ func NewFungibleTokenTransfers( log zerolog.Logger, chainID flow.ChainID, ftStore storage.FungibleTokenTransfersBootstrapper, + metrics module.ExtendedIndexingMetrics, ) *FungibleTokenTransfers { return &FungibleTokenTransfers{ log: log.With().Str("component", "account_ft_transfers_indexer").Logger(), ftParser: transfers.NewFTParser(chainID, omitFlowFees), ftStore: ftStore, + metrics: metrics, } } @@ -86,6 +90,8 @@ func (a *FungibleTokenTransfers) IndexBlockData(lctx lockctx.Proof, data BlockDa return fmt.Errorf("failed to store fungible token transfers: %w", err) } + a.metrics.FTTransferIndexed(len(ftEntries)) + return nil } diff --git a/module/state_synchronization/indexer/extended/account_ft_transfers_test.go b/module/state_synchronization/indexer/extended/account_ft_transfers_test.go index 2c54d256303..24e9a052197 100644 --- a/module/state_synchronization/indexer/extended/account_ft_transfers_test.go +++ b/module/state_synchronization/indexer/extended/account_ft_transfers_test.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/transfers/testutil" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" @@ -252,7 +253,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("empty block stores empty transfer slice", func(t *testing.T) { ftStore := &mockFTBootstrapper{latestHeight: 99} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)), @@ -267,7 +268,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("future height returns ErrFutureHeight", func(t *testing.T) { ftStore := &mockFTBootstrapper{latestHeight: 99} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(101)), // next expected is 100 @@ -280,7 +281,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("already indexed returns ErrAlreadyIndexed", func(t *testing.T) { ftStore := &mockFTBootstrapper{latestHeight: 99} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(99)), // next expected is 100 @@ -294,7 +295,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("NextHeight error propagates", func(t *testing.T) { nextHeightErr := fmt.Errorf("next height failure") ftStore := &mockFTBootstrapper{latestHeightErr: nextHeightErr} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)), @@ -309,7 +310,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("store error propagates", func(t *testing.T) { storeErr := fmt.Errorf("FT storage failure") ftStore := &mockFTBootstrapper{latestHeight: 99, storeErr: storeErr} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)), @@ -325,7 +326,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { // is present, since the indexer is created with omitFlowFees=true. t.Run("flow fees transfer omitted when FeesDeducted event is present", func(t *testing.T) { ftStore := &mockFTBootstrapper{latestHeight: 99} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) payer := unittest.RandomAddressFixture() txID := unittest.IdentifierFixture() @@ -350,7 +351,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { // Tests that a self-transfer (same source and recipient address) is not stored. t.Run("self-transfer is not stored", func(t *testing.T) { ftStore := &mockFTBootstrapper{latestHeight: 99} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) addr := unittest.RandomAddressFixture() txID := unittest.IdentifierFixture() @@ -374,7 +375,7 @@ func TestFungibleTokenTransfers_IndexBlockData(t *testing.T) { // The parser only omits transfers that are paired with a FeesDeducted event. t.Run("transfer to flow fees address without FeesDeducted event is indexed", func(t *testing.T) { ftStore := &mockFTBootstrapper{latestHeight: 99} - a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore) + a := NewFungibleTokenTransfers(unittest.Logger(), flow.Testnet, ftStore, metrics.NewNoopCollector()) payer := unittest.RandomAddressFixture() txID := unittest.IdentifierFixture() diff --git a/module/state_synchronization/indexer/extended/account_nft_transfers.go b/module/state_synchronization/indexer/extended/account_nft_transfers.go index 4393f1f0b5a..2046f218fde 100644 --- a/module/state_synchronization/indexer/extended/account_nft_transfers.go +++ b/module/state_synchronization/indexer/extended/account_nft_transfers.go @@ -7,6 +7,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/state_synchronization/indexer/extended/transfers" "github.com/onflow/flow-go/storage" ) @@ -18,6 +19,7 @@ type NonFungibleTokenTransfers struct { log zerolog.Logger nftParser *transfers.NFTParser nftStore storage.NonFungibleTokenTransfersBootstrapper + metrics module.ExtendedIndexingMetrics } var _ Indexer = (*NonFungibleTokenTransfers)(nil) @@ -27,11 +29,13 @@ func NewNonFungibleTokenTransfers( log zerolog.Logger, chainID flow.ChainID, nftStore storage.NonFungibleTokenTransfersBootstrapper, + metrics module.ExtendedIndexingMetrics, ) *NonFungibleTokenTransfers { return &NonFungibleTokenTransfers{ log: log.With().Str("component", "account_nft_transfers_indexer").Logger(), nftParser: transfers.NewNFTParser(chainID), nftStore: nftStore, + metrics: metrics, } } @@ -76,5 +80,7 @@ func (a *NonFungibleTokenTransfers) IndexBlockData(lctx lockctx.Proof, data Bloc return fmt.Errorf("failed to store non-fungible token transfers: %w", err) } + a.metrics.NFTTransferIndexed(len(nftEntries)) + return nil } diff --git a/module/state_synchronization/indexer/extended/account_nft_transfers_test.go b/module/state_synchronization/indexer/extended/account_nft_transfers_test.go index 5c3be7496df..c532de2921b 100644 --- a/module/state_synchronization/indexer/extended/account_nft_transfers_test.go +++ b/module/state_synchronization/indexer/extended/account_nft_transfers_test.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" ) @@ -109,7 +110,7 @@ func TestNonFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("empty block stores empty transfer slice", func(t *testing.T) { nftStore := &mockNFTBootstrapper{latestHeight: 99} - a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore) + a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)), @@ -124,7 +125,7 @@ func TestNonFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("future height returns ErrFutureHeight", func(t *testing.T) { nftStore := &mockNFTBootstrapper{latestHeight: 99} - a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore) + a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(101)), // next expected is 100 @@ -137,7 +138,7 @@ func TestNonFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("already indexed returns ErrAlreadyIndexed", func(t *testing.T) { nftStore := &mockNFTBootstrapper{latestHeight: 99} - a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore) + a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(99)), // next expected is 100 @@ -151,7 +152,7 @@ func TestNonFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("NextHeight error propagates", func(t *testing.T) { nextHeightErr := fmt.Errorf("next height failure") nftStore := &mockNFTBootstrapper{latestHeightErr: nextHeightErr} - a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore) + a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)), @@ -166,7 +167,7 @@ func TestNonFungibleTokenTransfers_IndexBlockData(t *testing.T) { t.Run("store error propagates", func(t *testing.T) { storeErr := fmt.Errorf("NFT storage failure") nftStore := &mockNFTBootstrapper{latestHeight: 99, storeErr: storeErr} - a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore) + a := NewNonFungibleTokenTransfers(unittest.Logger(), flow.Testnet, nftStore, metrics.NewNoopCollector()) data := BlockData{ Header: unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)), diff --git a/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go b/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go index 062f909b2ef..a196834cbf4 100644 --- a/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go +++ b/module/state_synchronization/indexer/extended/bootstrap/bootstrap.go @@ -112,20 +112,22 @@ func BootstrapIndexers( return nil, fmt.Errorf("could not create account transactions indexer: %w", err) } + extendedMetrics := metrics.NewExtendedIndexingCollector() + ftTransfers := extended.NewFungibleTokenTransfers( log, chainID, extendedStorage.FungibleTokenTransfersBootstrapper, + extendedMetrics, ) nftTransfers := extended.NewNonFungibleTokenTransfers( log, chainID, extendedStorage.NonFungibleTokenTransfersBootstrapper, + extendedMetrics, ) - extendedMetrics := metrics.NewExtendedIndexingCollector() - scheduledTransactions := extended.NewScheduledTransactions( log, extendedStorage.ScheduledTransactionsBootstrapper, From bf80533259f09981c79d8c580e02806774681a3b Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Thu, 26 Feb 2026 21:31:38 -0800 Subject: [PATCH 08/18] fix to align with upstream --- .../backend_scheduled_transactions.go | 4 ++-- .../backend_scheduled_transactions_test.go | 8 ++++---- storage/indexes/scheduled_transactions.go | 20 +++++++++++++------ 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/access/backends/extended/backend_scheduled_transactions.go b/access/backends/extended/backend_scheduled_transactions.go index a165c43744e..aefc646058a 100644 --- a/access/backends/extended/backend_scheduled_transactions.go +++ b/access/backends/extended/backend_scheduled_transactions.go @@ -41,7 +41,7 @@ type ScheduledTransactionFilter struct { TransactionHandlerUUID *uint64 } -func (f *ScheduledTransactionFilter) IsEmpty() bool { +func (f *ScheduledTransactionFilter) isEmpty() bool { if f == nil { return true } @@ -59,7 +59,7 @@ func (f *ScheduledTransactionFilter) IsEmpty() bool { // Filter builds a [storage.IndexFilter] from the non-nil filter fields. func (f *ScheduledTransactionFilter) Filter() storage.IndexFilter[*accessmodel.ScheduledTransaction] { - if f.IsEmpty() { + if f.isEmpty() { return nil } diff --git a/access/backends/extended/backend_scheduled_transactions_test.go b/access/backends/extended/backend_scheduled_transactions_test.go index 040d4b6a419..4155548ba25 100644 --- a/access/backends/extended/backend_scheduled_transactions_test.go +++ b/access/backends/extended/backend_scheduled_transactions_test.go @@ -29,8 +29,8 @@ type testSchedTxEntry struct { tx accessmodel.ScheduledTransaction } -func (e testSchedTxEntry) Cursor() (accessmodel.ScheduledTransactionCursor, error) { - return accessmodel.ScheduledTransactionCursor{ID: e.tx.ID}, nil +func (e testSchedTxEntry) Cursor() accessmodel.ScheduledTransactionCursor { + return accessmodel.ScheduledTransactionCursor{ID: e.tx.ID} } func (e testSchedTxEntry) Value() (accessmodel.ScheduledTransaction, error) { @@ -39,9 +39,9 @@ func (e testSchedTxEntry) Value() (accessmodel.ScheduledTransaction, error) { // makeScheduledTxIter builds a storage.ScheduledTransactionIterator from a slice of transactions. func makeScheduledTxIter(txs []accessmodel.ScheduledTransaction) storage.ScheduledTransactionIterator { - return func(yield func(storage.IteratorEntry[accessmodel.ScheduledTransaction, accessmodel.ScheduledTransactionCursor]) bool) { + return func(yield func(storage.IteratorEntry[accessmodel.ScheduledTransaction, accessmodel.ScheduledTransactionCursor], error) bool) { for _, tx := range txs { - if !yield(testSchedTxEntry{tx: tx}) { + if !yield(testSchedTxEntry{tx: tx}, nil) { return } } diff --git a/storage/indexes/scheduled_transactions.go b/storage/indexes/scheduled_transactions.go index 04480a9670f..7eef170a04f 100644 --- a/storage/indexes/scheduled_transactions.go +++ b/storage/indexes/scheduled_transactions.go @@ -17,9 +17,9 @@ import ( const ( // scheduledTxPrimaryKeyLen is [code(1)][~id(8)] = 9 bytes - scheduledTxPrimaryKeyLen = 1 + 8 + scheduledTxPrimaryKeyLen = 1 + heightLen // scheduledTxByAddrKeyLen is [code(1)][address(8)][~id(8)] = 17 bytes - scheduledTxByAddrKeyLen = 1 + flow.AddressLength + 8 + scheduledTxByAddrKeyLen = 1 + flow.AddressLength + heightLen ) // ScheduledTransactionsIndex implements [storage.ScheduledTransactionsIndex] using Pebble. @@ -152,8 +152,12 @@ func (idx *ScheduledTransactionsIndex) ByAddress( // The by-address index is key-only (nil values). The getValue closure performs // a secondary lookup into the primary index using the decoded cursor's ID. - getValue := func(cur access.ScheduledTransactionCursor, _ []byte, dest *access.ScheduledTransaction) error { - return operation.RetrieveByKey(reader, makeScheduledTxPrimaryKey(cur.ID), dest) + getValue := func(cur access.ScheduledTransactionCursor, _ []byte) (*access.ScheduledTransaction, error) { + var tx access.ScheduledTransaction + if err := operation.RetrieveByKey(reader, makeScheduledTxPrimaryKey(cur.ID), &tx); err != nil { + return nil, err + } + return &tx, nil } return iterator.Build(iter, decodeScheduledTxByAddrCursor, getValue), nil @@ -320,8 +324,12 @@ func (idx *ScheduledTransactionsIndex) Failed( // reconstructScheduledTx decodes a msgpack-encoded value into a [access.ScheduledTransaction]. // // Any error indicates a malformed value. -func reconstructScheduledTx(_ access.ScheduledTransactionCursor, value []byte, dest *access.ScheduledTransaction) error { - return msgpack.Unmarshal(value, dest) +func reconstructScheduledTx(_ access.ScheduledTransactionCursor, value []byte) (*access.ScheduledTransaction, error) { + var tx access.ScheduledTransaction + if err := msgpack.Unmarshal(value, &tx); err != nil { + return nil, err + } + return &tx, nil } // makeScheduledTxPrimaryKey creates a primary key [code][~id]. From a0e929407e114e2db08e499df86a5e0043bed138 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Fri, 27 Feb 2026 06:40:07 -0800 Subject: [PATCH 09/18] move scheduled under accounts in rest api --- .../request/get_scheduled_transactions.go | 2 +- .../experimental/routes/scheduled_transactions.go | 2 +- .../routes/scheduled_transactions_test.go | 2 +- engine/access/rest/router/routes_experimental.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- integration/go.mod | 2 +- integration/go.sum | 4 ++-- .../cohort3/extended_indexing_scheduled_txs_test.go | 12 ++++++------ 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/engine/access/rest/experimental/request/get_scheduled_transactions.go b/engine/access/rest/experimental/request/get_scheduled_transactions.go index 368c193fc07..893d6a274a6 100644 --- a/engine/access/rest/experimental/request/get_scheduled_transactions.go +++ b/engine/access/rest/experimental/request/get_scheduled_transactions.go @@ -58,7 +58,7 @@ type GetAccountScheduledTransactions struct { Address flow.Address } -// NewGetScheduledTransactionsByAddress parses GET /scheduled/account/{address}. +// NewGetScheduledTransactionsByAddress parses GET /accounts/{address}/scheduled. // // All errors indicate an invalid request. func NewGetScheduledTransactionsByAddress(r *common.Request) (GetAccountScheduledTransactions, error) { diff --git a/engine/access/rest/experimental/routes/scheduled_transactions.go b/engine/access/rest/experimental/routes/scheduled_transactions.go index 0a485239122..5ae25fe2ae9 100644 --- a/engine/access/rest/experimental/routes/scheduled_transactions.go +++ b/engine/access/rest/experimental/routes/scheduled_transactions.go @@ -57,7 +57,7 @@ func GetScheduledTransaction(r *common.Request, backend extended.API, link commo return m, nil } -// GetScheduledTransactionsByAddress handles GET /scheduled/account/{address}. +// GetScheduledTransactionsByAddress handles GET /accounts/{address}/scheduled. func GetScheduledTransactionsByAddress(r *common.Request, backend extended.API, link commonmodels.LinkGenerator) (interface{}, error) { req, err := request.NewGetScheduledTransactionsByAddress(r) if err != nil { diff --git a/engine/access/rest/experimental/routes/scheduled_transactions_test.go b/engine/access/rest/experimental/routes/scheduled_transactions_test.go index 97af86e85b1..a506a11fca0 100644 --- a/engine/access/rest/experimental/routes/scheduled_transactions_test.go +++ b/engine/access/rest/experimental/routes/scheduled_transactions_test.go @@ -61,7 +61,7 @@ func scheduledTxByIDURL(t *testing.T, id uint64, params scheduledTxURLParams) st } func scheduledTxsByAddrURL(t *testing.T, address string, params scheduledTxURLParams) string { - u, err := url.ParseRequestURI(fmt.Sprintf("/experimental/v1/scheduled/account/%s", address)) + u, err := url.ParseRequestURI(fmt.Sprintf("/experimental/v1/accounts/%s/scheduled", address)) require.NoError(t, err) q := u.Query() if params.limit != "" { diff --git a/engine/access/rest/router/routes_experimental.go b/engine/access/rest/router/routes_experimental.go index 4ccc25fac9a..5896382c5e3 100644 --- a/engine/access/rest/router/routes_experimental.go +++ b/engine/access/rest/router/routes_experimental.go @@ -43,7 +43,7 @@ var ExperimentalRoutes = []experimentalRoute{{ Handler: routes.GetScheduledTransaction, }, { Method: http.MethodGet, - Pattern: "/scheduled/account/{address}", + Pattern: "/accounts/{address}/scheduled", Name: "getScheduledTransactionsByAddress", Handler: routes.GetScheduledTransactionsByAddress, }} diff --git a/go.mod b/go.mod index 73d8c06467b..0a1cb4311f5 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/onflow/atree v0.12.1 github.com/onflow/cadence v1.9.10 github.com/onflow/crypto v0.25.4 - github.com/onflow/flow v0.4.20-0.20260217184252-0c5bee538d76 + github.com/onflow/flow v0.4.20-0.20260227142445-6427bfb62cdc github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.3 github.com/onflow/flow-core-contracts/lib/go/templates v1.9.3 github.com/onflow/flow-go-sdk v1.9.16 diff --git a/go.sum b/go.sum index 403eea33c23..3a4e68dacf7 100644 --- a/go.sum +++ b/go.sum @@ -948,8 +948,8 @@ github.com/onflow/crypto v0.25.4 h1:R615PWPdSoA5RATNb/j3cYaloBIZlSXVNgS7BjwHiwM= github.com/onflow/crypto v0.25.4/go.mod h1:DlkW/1SPUvLHYvUcjWa9PkLIRgSBKR4EDc3i+ATQKW4= github.com/onflow/fixed-point v0.1.1 h1:j0jYZVO8VGyk1476alGudEg7XqCkeTVxb5ElRJRKS90= github.com/onflow/fixed-point v0.1.1/go.mod h1:gJdoHqKtToKdOZbvryJvDZfcpzC7d2fyWuo3ZmLtcGY= -github.com/onflow/flow v0.4.20-0.20260217184252-0c5bee538d76 h1:d+dSM+OEP+Dq/8S8tF7TpBtsVAzmzHVA9D2OKeAF5So= -github.com/onflow/flow v0.4.20-0.20260217184252-0c5bee538d76/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= +github.com/onflow/flow v0.4.20-0.20260227142445-6427bfb62cdc h1:Z3YY3bGM/GGoN8cNp8CDfzAzsRm0RTqVIpQ5tWQVHr4= +github.com/onflow/flow v0.4.20-0.20260227142445-6427bfb62cdc/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.3 h1:AFl2fKKXhSW0X0KpqBMteQkIJLRjVJzIJzGbMuOGgeE= github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.3/go.mod h1:hV8Pi5pGraiY8f9k0tAeuky6m+NbIMvxf7wg5QZ+e8k= github.com/onflow/flow-core-contracts/lib/go/templates v1.9.3 h1:b70XytJTPthaLcQJC3neGLZbQGBEw/SvKgYVNUv1JKM= diff --git a/integration/go.mod b/integration/go.mod index 0f383af19ba..d227368b1f8 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -22,7 +22,7 @@ require ( github.com/libp2p/go-libp2p v0.38.2 github.com/onflow/cadence v1.9.10 github.com/onflow/crypto v0.25.4 - github.com/onflow/flow v0.4.20-0.20260217184252-0c5bee538d76 + github.com/onflow/flow v0.4.20-0.20260227142445-6427bfb62cdc github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.3 github.com/onflow/flow-core-contracts/lib/go/templates v1.9.3 github.com/onflow/flow-ft/lib/go/contracts v1.0.1 diff --git a/integration/go.sum b/integration/go.sum index d8639c5de53..4f9437350e2 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -758,8 +758,8 @@ github.com/onflow/crypto v0.25.4 h1:R615PWPdSoA5RATNb/j3cYaloBIZlSXVNgS7BjwHiwM= github.com/onflow/crypto v0.25.4/go.mod h1:DlkW/1SPUvLHYvUcjWa9PkLIRgSBKR4EDc3i+ATQKW4= github.com/onflow/fixed-point v0.1.1 h1:j0jYZVO8VGyk1476alGudEg7XqCkeTVxb5ElRJRKS90= github.com/onflow/fixed-point v0.1.1/go.mod h1:gJdoHqKtToKdOZbvryJvDZfcpzC7d2fyWuo3ZmLtcGY= -github.com/onflow/flow v0.4.20-0.20260217184252-0c5bee538d76 h1:d+dSM+OEP+Dq/8S8tF7TpBtsVAzmzHVA9D2OKeAF5So= -github.com/onflow/flow v0.4.20-0.20260217184252-0c5bee538d76/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= +github.com/onflow/flow v0.4.20-0.20260227142445-6427bfb62cdc h1:Z3YY3bGM/GGoN8cNp8CDfzAzsRm0RTqVIpQ5tWQVHr4= +github.com/onflow/flow v0.4.20-0.20260227142445-6427bfb62cdc/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.3 h1:AFl2fKKXhSW0X0KpqBMteQkIJLRjVJzIJzGbMuOGgeE= github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.3/go.mod h1:hV8Pi5pGraiY8f9k0tAeuky6m+NbIMvxf7wg5QZ+e8k= github.com/onflow/flow-core-contracts/lib/go/templates v1.9.3 h1:b70XytJTPthaLcQJC3neGLZbQGBEw/SvKgYVNUv1JKM= diff --git a/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go b/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go index 50f8cf200c1..8006ccdbf25 100644 --- a/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go +++ b/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go @@ -88,10 +88,10 @@ func (s *ExtendedIndexingSuite) TestScheduledTransactionLifecycle() { s.True(foundID1, "tx1 (executed) should appear in /scheduled") s.True(foundID2, "tx2 (cancelled) should appear in /scheduled") - // ---- Verify /scheduled/account/{address} scopes to owner ---- + // ---- Verify /accounts/{address}/scheduled scopes to owner ---- ownerAddr := flow.Address(accessClient.SDKServiceAddress()).String() addrTxs := s.fetchAllScheduledTxsByAddress(ownerAddr, 20) - s.T().Logf("found %d scheduled transactions in /scheduled/account/{address}", len(addrTxs)) + s.T().Logf("found %d scheduled transactions in /accounts/{address}/scheduled", len(addrTxs)) var addrFoundID1, addrFoundID2 bool for _, tx := range addrTxs { @@ -103,8 +103,8 @@ func (s *ExtendedIndexingSuite) TestScheduledTransactionLifecycle() { addrFoundID2 = true } } - s.True(addrFoundID1, "tx1 should appear in /scheduled/account/{address}") - s.True(addrFoundID2, "tx2 should appear in /scheduled/account/{address}") + s.True(addrFoundID1, "tx1 should appear in /accounts/{address}/scheduled") + s.True(addrFoundID2, "tx2 should appear in /accounts/{address}/scheduled") // ---- Verify pagination works via /scheduled with limit=1 ---- s.verifyScheduledTxPagination() @@ -147,10 +147,10 @@ func (s *ExtendedIndexingSuite) fetchAllScheduledTxs(pageSize int) []map[string] ) } -// fetchAllScheduledTxsByAddress paginates through GET /experimental/v1/scheduled/account/{address}. +// fetchAllScheduledTxsByAddress paginates through GET /experimental/v1/accounts/{address}/scheduled. func (s *ExtendedIndexingSuite) fetchAllScheduledTxsByAddress(address string, pageSize int) []map[string]any { return s.collectScheduledPages( - fmt.Sprintf("%s/experimental/v1/scheduled/account/%s?limit=%d", s.restBaseURL, address, pageSize), + fmt.Sprintf("%s/experimental/v1/accounts/%s/scheduled?limit=%d", s.restBaseURL, address, pageSize), pageSize, ) } From 8ffa34d6887d66ed5bc73a83cf890644070d33f5 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Fri, 27 Feb 2026 14:34:54 -0800 Subject: [PATCH 10/18] add rangeKeys methods --- storage/indexes/scheduled_transactions.go | 46 ++++++++++++++++++----- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/storage/indexes/scheduled_transactions.go b/storage/indexes/scheduled_transactions.go index 7eef170a04f..c22107cdeab 100644 --- a/storage/indexes/scheduled_transactions.go +++ b/storage/indexes/scheduled_transactions.go @@ -110,11 +110,7 @@ func (idx *ScheduledTransactionsIndex) ByID(id uint64) (access.ScheduledTransact func (idx *ScheduledTransactionsIndex) All( cursor *access.ScheduledTransactionCursor, ) (storage.ScheduledTransactionIterator, error) { - startKey := makeScheduledTxPrimaryKey(math.MaxUint64) - if cursor != nil { - startKey = makeScheduledTxPrimaryKey(cursor.ID) - } - endKey := makeScheduledTxPrimaryKey(0) + startKey, endKey := idx.rangeKeysAll(cursor) reader := idx.db.Reader() iter, err := reader.NewIter(startKey, endKey, storage.DefaultIteratorOptions()) @@ -138,11 +134,7 @@ func (idx *ScheduledTransactionsIndex) ByAddress( account flow.Address, cursor *access.ScheduledTransactionCursor, ) (storage.ScheduledTransactionIterator, error) { - startKey := makeScheduledTxByAddrKey(account, math.MaxUint64) - if cursor != nil { - startKey = makeScheduledTxByAddrKey(account, cursor.ID) - } - endKey := makeScheduledTxByAddrKey(account, 0) + startKey, endKey := idx.rangeKeysAddress(account, cursor) reader := idx.db.Reader() iter, err := reader.NewIter(startKey, endKey, storage.DefaultIteratorOptions()) @@ -163,6 +155,40 @@ func (idx *ScheduledTransactionsIndex) ByAddress( return iterator.Build(iter, decodeScheduledTxByAddrCursor, getValue), nil } +// rangeKeysAll computes the start and end keys for iterating over all scheduled transactions based +// on the provided cursor. +// +// Any error indicates the cursor is invalid +func (idx *ScheduledTransactionsIndex) rangeKeysAll(cursor *access.ScheduledTransactionCursor) (startKey, endKey []byte) { + if cursor == nil { + // keys include the one's complement of the ID, so iteration is in descending order of ids. + startKey = makeScheduledTxPrimaryKey(math.MaxUint64) + endKey = makeScheduledTxPrimaryKey(0) + return startKey, endKey + } + + startKey = makeScheduledTxPrimaryKey(cursor.ID) + endKey = makeScheduledTxPrimaryKey(0) + return startKey, endKey +} + +// rangeKeysAddress computes the start and end keys for iterating over scheduled transactions of an +// account, based on the provided cursor. +// +// Any error indicates the cursor is invalid +func (idx *ScheduledTransactionsIndex) rangeKeysAddress(address flow.Address, cursor *access.ScheduledTransactionCursor) (startKey, endKey []byte) { + if cursor == nil { + // keys include the one's complement of the ID, so iteration is in descending order of ids. + startKey := makeScheduledTxByAddrKey(address, math.MaxUint64) + endKey := makeScheduledTxByAddrKey(address, 0) + return startKey, endKey + } + + startKey = makeScheduledTxByAddrKey(address, cursor.ID) + endKey = makeScheduledTxByAddrKey(address, 0) + return startKey, endKey +} + // Store indexes new scheduled transactions from the block and advances the latest indexed height. // Must be called with consecutive block heights. // The caller must hold the [storage.LockIndexScheduledTransactionsIndex] lock until committed. From a8687d26d3fa2baea17c0e1d8e376c2320cf26d2 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Fri, 27 Feb 2026 17:22:16 -0800 Subject: [PATCH 11/18] fix error handling in rest models --- .../models/model_scheduled_transaction.go | 20 +++++----- .../models/scheduled_transaction.go | 37 +++++++++++-------- .../routes/scheduled_transactions.go | 10 ++++- .../routes/scheduled_transactions_test.go | 15 ++------ module/metrics/noop.go | 2 +- 5 files changed, 46 insertions(+), 38 deletions(-) diff --git a/engine/access/rest/experimental/models/model_scheduled_transaction.go b/engine/access/rest/experimental/models/model_scheduled_transaction.go index 6f7d982c8ee..8d3120340c4 100644 --- a/engine/access/rest/experimental/models/model_scheduled_transaction.go +++ b/engine/access/rest/experimental/models/model_scheduled_transaction.go @@ -31,13 +31,15 @@ type ScheduledTransaction struct { // Fees returned on cancellation, as a UFix64 decimal string. FeesReturned string `json:"fees_returned,omitempty"` // Fees deducted on cancellation, as a UFix64 decimal string. - FeesDeducted string `json:"fees_deducted,omitempty"` - CreatedTransactionId string `json:"created_transaction_id"` - ExecutedTransactionId string `json:"executed_transaction_id,omitempty"` - CancelledTransactionId string `json:"cancelled_transaction_id,omitempty"` - Transaction *commonmodels.Transaction `json:"transaction,omitempty"` - Result *commonmodels.TransactionResult `json:"result,omitempty"` - HandlerContract *Contract `json:"handler_contract,omitempty"` - Expandable *ScheduledTransactionExpandable `json:"_expandable"` - Links *commonmodels.Links `json:"_links,omitempty"` + FeesDeducted string `json:"fees_deducted,omitempty"` + CreatedTransactionId string `json:"created_transaction_id"` + ExecutedTransactionId string `json:"executed_transaction_id,omitempty"` + CancelledTransactionId string `json:"cancelled_transaction_id,omitempty"` + // True if the scheduled transaction was created during bootstrapping based on the current chain state, not based on a protocol event. When true, block_height, transaction_id, tx_index, and event_index are absent. + IsPlaceholder bool `json:"is_placeholder,omitempty"` + Transaction *commonmodels.Transaction `json:"transaction,omitempty"` + Result *commonmodels.TransactionResult `json:"result,omitempty"` + HandlerContract *Contract `json:"handler_contract,omitempty"` + Expandable *ScheduledTransactionExpandable `json:"_expandable"` + Links *commonmodels.Links `json:"_links,omitempty"` } diff --git a/engine/access/rest/experimental/models/scheduled_transaction.go b/engine/access/rest/experimental/models/scheduled_transaction.go index 68830e42c38..1f651b99c01 100644 --- a/engine/access/rest/experimental/models/scheduled_transaction.go +++ b/engine/access/rest/experimental/models/scheduled_transaction.go @@ -1,6 +1,7 @@ package models import ( + "fmt" "strconv" commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" @@ -8,18 +9,12 @@ import ( "github.com/onflow/flow-go/model/flow" ) -const ( - expandableTransaction = "transaction" - expandableResult = "result" - expandableHandlerContract = "handler_contract" -) - // Build populates a [ScheduledTransaction] from a domain model. func (t *ScheduledTransaction) Build( tx *accessmodel.ScheduledTransaction, link commonmodels.LinkGenerator, expand map[string]bool, -) { +) error { t.Id = strconv.FormatUint(tx.ID, 10) var priority ScheduledTransactionPriority priority.Build(tx.Priority) @@ -51,28 +46,40 @@ func (t *ScheduledTransaction) Build( t.CancelledTransactionId = tx.CancelledTransactionID.String() } + t.IsPlaceholder = tx.IsPlaceholder + t.Expandable = new(ScheduledTransactionExpandable) - if expand[expandableTransaction] && tx.Transaction != nil { + if tx.Transaction != nil { t.Transaction = new(commonmodels.Transaction) t.Transaction.Build(tx.Transaction, nil, link) - } else { - t.Expandable.Transaction = expandableTransaction + } else if tx.ExecutedTransactionID != flow.ZeroID { + transactionLink, err := link.TransactionLink(tx.ExecutedTransactionID) + if err != nil { + return fmt.Errorf("failed to generate transaction link: %w", err) + } + t.Expandable.Transaction = transactionLink } - if expand[expandableResult] && tx.Result != nil { + if tx.Result != nil { t.Result = new(commonmodels.TransactionResult) t.Result.Build(tx.Result, tx.ExecutedTransactionID, link) - } else { - t.Expandable.Result = expandableResult + } else if tx.ExecutedTransactionID != flow.ZeroID { + resultLink, err := link.TransactionResultLink(tx.ExecutedTransactionID) + if err != nil { + return fmt.Errorf("failed to generate result link: %w", err) + } + t.Expandable.Result = resultLink } - if expand[expandableHandlerContract] && tx.HandlerContract != nil { + if tx.HandlerContract != nil { t.HandlerContract = new(Contract) t.HandlerContract.Build(tx.HandlerContract) } else { - t.Expandable.HandlerContract = expandableHandlerContract + t.Expandable.HandlerContract = "handler_contract" // TODO: this will be implemented in the next PR } + + return nil } // Build sets the [ScheduledTransactionStatus] from a domain status value. diff --git a/engine/access/rest/experimental/routes/scheduled_transactions.go b/engine/access/rest/experimental/routes/scheduled_transactions.go index 5ae25fe2ae9..c109261f3bc 100644 --- a/engine/access/rest/experimental/routes/scheduled_transactions.go +++ b/engine/access/rest/experimental/routes/scheduled_transactions.go @@ -53,7 +53,10 @@ func GetScheduledTransaction(r *common.Request, backend extended.API, link commo } var m models.ScheduledTransaction - m.Build(tx, link, r.ExpandFields) + err = m.Build(tx, link, r.ExpandFields) + if err != nil { + return nil, common.NewRestError(http.StatusInternalServerError, "failed to build scheduled transaction", err) + } return m, nil } @@ -89,7 +92,10 @@ func buildScheduledTransactionsResponse( ) (models.ScheduledTransactionsResponse, error) { scheduledTransactions := make([]models.ScheduledTransaction, len(page.Transactions)) for i := range page.Transactions { - scheduledTransactions[i].Build(&page.Transactions[i], link, expandMap) + err := scheduledTransactions[i].Build(&page.Transactions[i], link, expandMap) + if err != nil { + return models.ScheduledTransactionsResponse{}, common.NewRestError(http.StatusInternalServerError, "failed to build scheduled transaction", err) + } } var nextCursor string diff --git a/engine/access/rest/experimental/routes/scheduled_transactions_test.go b/engine/access/rest/experimental/routes/scheduled_transactions_test.go index a506a11fca0..53c81c7cd03 100644 --- a/engine/access/rest/experimental/routes/scheduled_transactions_test.go +++ b/engine/access/rest/experimental/routes/scheduled_transactions_test.go @@ -159,8 +159,6 @@ func TestGetScheduledTransactions(t *testing.T) { "transaction_handler_uuid": "7", "created_transaction_id": "%s", "_expandable": { - "transaction": "transaction", - "result": "result", "handler_contract": "handler_contract" } }, @@ -177,14 +175,14 @@ func TestGetScheduledTransactions(t *testing.T) { "created_transaction_id": "%s", "executed_transaction_id": "%s", "_expandable": { - "transaction": "transaction", - "result": "result", + "transaction": "/v1/transactions/%s", + "result": "/v1/transaction_results/%s", "handler_contract": "handler_contract" } } ], "next_cursor": "%s" - }`, handlerOwner.String(), tx1CreatedID.String(), handlerOwner.String(), tx2CreatedID.String(), tx2ExecutedID.String(), expectedNextCursor) + }`, handlerOwner.String(), tx1CreatedID.String(), handlerOwner.String(), tx2CreatedID.String(), tx2ExecutedID.String(), tx2ExecutedID.String(), tx2ExecutedID.String(), expectedNextCursor) assert.JSONEq(t, expected, rr.Body.String()) }) @@ -368,8 +366,6 @@ func TestGetScheduledTransaction(t *testing.T) { "transaction_handler_uuid": "3", "created_transaction_id": "%s", "_expandable": { - "transaction": "transaction", - "result": "result", "handler_contract": "handler_contract" } }`, handlerOwner.String(), txCreatedID.String()) @@ -438,10 +434,7 @@ func TestGetScheduledTransaction(t *testing.T) { "identifier": "A.0000.MyScheduler", "body": "pub contract MyScheduler {}" }, - "_expandable": { - "transaction": "transaction", - "result": "result" - } + "_expandable": {} }`, handlerOwner.String(), txCreatedID.String()) assert.JSONEq(t, expected, rr.Body.String()) diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 7d06c06a82a..b173a8411c7 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -379,7 +379,7 @@ func (nc *NoopCollector) InitializeLatestHeight(height uint64) {} var _ module.ExtendedIndexingMetrics = (*NoopCollector)(nil) -func (nc *NoopCollector) BlockIndexedExtended(string, uint64) {} +func (nc *NoopCollector) BlockIndexedExtended(string, uint64) {} func (nc *NoopCollector) ScheduledTransactionIndexed(int, int, int, int, int) {} func (nc *NoopCollector) FTTransferIndexed(int) {} func (nc *NoopCollector) NFTTransferIndexed(int) {} From a4f3cbb449170b9430e44340ad29a5ffbb085a0b Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:06:26 -0800 Subject: [PATCH 12/18] generate mocks --- module/mock/extended_indexing_metrics.go | 122 +++++++++++++---------- 1 file changed, 68 insertions(+), 54 deletions(-) diff --git a/module/mock/extended_indexing_metrics.go b/module/mock/extended_indexing_metrics.go index 9cb9404846e..c4b8324269c 100644 --- a/module/mock/extended_indexing_metrics.go +++ b/module/mock/extended_indexing_metrics.go @@ -81,132 +81,146 @@ func (_c *ExtendedIndexingMetrics_BlockIndexedExtended_Call) RunAndReturn(run fu return _c } -// ScheduledTransactionIndexed provides a mock function for the type ExtendedIndexingMetrics -func (_mock *ExtendedIndexingMetrics) ScheduledTransactionIndexed(scheduled int, executed int, failed int, canceled int, backfilled int) { - _mock.Called(scheduled, executed, failed, canceled, backfilled) +// FTTransferIndexed provides a mock function for the type ExtendedIndexingMetrics +func (_mock *ExtendedIndexingMetrics) FTTransferIndexed(count int) { + _mock.Called(count) return } -// ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScheduledTransactionIndexed' -type ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call struct { +// ExtendedIndexingMetrics_FTTransferIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FTTransferIndexed' +type ExtendedIndexingMetrics_FTTransferIndexed_Call struct { *mock.Call } -// ScheduledTransactionIndexed is a helper method to define mock.On call -// - scheduled int -// - executed int -// - failed int -// - canceled int -// - backfilled int -func (_e *ExtendedIndexingMetrics_Expecter) ScheduledTransactionIndexed(scheduled interface{}, executed interface{}, failed interface{}, canceled interface{}, backfilled interface{}) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { - return &ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call{Call: _e.mock.On("ScheduledTransactionIndexed", scheduled, executed, failed, canceled, backfilled)} +// FTTransferIndexed is a helper method to define mock.On call +// - count int +func (_e *ExtendedIndexingMetrics_Expecter) FTTransferIndexed(count interface{}) *ExtendedIndexingMetrics_FTTransferIndexed_Call { + return &ExtendedIndexingMetrics_FTTransferIndexed_Call{Call: _e.mock.On("FTTransferIndexed", count)} } -func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) Run(run func(scheduled int, executed int, failed int, canceled int, backfilled int)) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { +func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) Run(run func(count int)) *ExtendedIndexingMetrics_FTTransferIndexed_Call { _c.Call.Run(func(args mock.Arguments) { - var arg0, arg1, arg2, arg3, arg4 int + var arg0 int if args[0] != nil { arg0 = args[0].(int) } - if args[1] != nil { - arg1 = args[1].(int) - } - if args[2] != nil { - arg2 = args[2].(int) - } - if args[3] != nil { - arg3 = args[3].(int) - } - if args[4] != nil { - arg4 = args[4].(int) - } - run(arg0, arg1, arg2, arg3, arg4) + run( + arg0, + ) }) return _c } -func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) Return() *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { +func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) Return() *ExtendedIndexingMetrics_FTTransferIndexed_Call { _c.Call.Return() return _c } -func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) RunAndReturn(run func(int, int, int, int, int)) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { +func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) RunAndReturn(run func(count int)) *ExtendedIndexingMetrics_FTTransferIndexed_Call { _c.Run(run) return _c } -// FTTransferIndexed provides a mock function for the type ExtendedIndexingMetrics -func (_mock *ExtendedIndexingMetrics) FTTransferIndexed(count int) { +// NFTTransferIndexed provides a mock function for the type ExtendedIndexingMetrics +func (_mock *ExtendedIndexingMetrics) NFTTransferIndexed(count int) { _mock.Called(count) return } -// ExtendedIndexingMetrics_FTTransferIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FTTransferIndexed' -type ExtendedIndexingMetrics_FTTransferIndexed_Call struct { +// ExtendedIndexingMetrics_NFTTransferIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NFTTransferIndexed' +type ExtendedIndexingMetrics_NFTTransferIndexed_Call struct { *mock.Call } -// FTTransferIndexed is a helper method to define mock.On call +// NFTTransferIndexed is a helper method to define mock.On call // - count int -func (_e *ExtendedIndexingMetrics_Expecter) FTTransferIndexed(count interface{}) *ExtendedIndexingMetrics_FTTransferIndexed_Call { - return &ExtendedIndexingMetrics_FTTransferIndexed_Call{Call: _e.mock.On("FTTransferIndexed", count)} +func (_e *ExtendedIndexingMetrics_Expecter) NFTTransferIndexed(count interface{}) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { + return &ExtendedIndexingMetrics_NFTTransferIndexed_Call{Call: _e.mock.On("NFTTransferIndexed", count)} } -func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) Run(run func(count int)) *ExtendedIndexingMetrics_FTTransferIndexed_Call { +func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) Run(run func(count int)) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { _c.Call.Run(func(args mock.Arguments) { var arg0 int if args[0] != nil { arg0 = args[0].(int) } - run(arg0) + run( + arg0, + ) }) return _c } -func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) Return() *ExtendedIndexingMetrics_FTTransferIndexed_Call { +func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) Return() *ExtendedIndexingMetrics_NFTTransferIndexed_Call { _c.Call.Return() return _c } -func (_c *ExtendedIndexingMetrics_FTTransferIndexed_Call) RunAndReturn(run func(int)) *ExtendedIndexingMetrics_FTTransferIndexed_Call { +func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) RunAndReturn(run func(count int)) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { _c.Run(run) return _c } -// NFTTransferIndexed provides a mock function for the type ExtendedIndexingMetrics -func (_mock *ExtendedIndexingMetrics) NFTTransferIndexed(count int) { - _mock.Called(count) +// ScheduledTransactionIndexed provides a mock function for the type ExtendedIndexingMetrics +func (_mock *ExtendedIndexingMetrics) ScheduledTransactionIndexed(scheduled int, executed int, failed int, canceled int, backfilled int) { + _mock.Called(scheduled, executed, failed, canceled, backfilled) return } -// ExtendedIndexingMetrics_NFTTransferIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NFTTransferIndexed' -type ExtendedIndexingMetrics_NFTTransferIndexed_Call struct { +// ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScheduledTransactionIndexed' +type ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call struct { *mock.Call } -// NFTTransferIndexed is a helper method to define mock.On call -// - count int -func (_e *ExtendedIndexingMetrics_Expecter) NFTTransferIndexed(count interface{}) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { - return &ExtendedIndexingMetrics_NFTTransferIndexed_Call{Call: _e.mock.On("NFTTransferIndexed", count)} +// ScheduledTransactionIndexed is a helper method to define mock.On call +// - scheduled int +// - executed int +// - failed int +// - canceled int +// - backfilled int +func (_e *ExtendedIndexingMetrics_Expecter) ScheduledTransactionIndexed(scheduled interface{}, executed interface{}, failed interface{}, canceled interface{}, backfilled interface{}) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { + return &ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call{Call: _e.mock.On("ScheduledTransactionIndexed", scheduled, executed, failed, canceled, backfilled)} } -func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) Run(run func(count int)) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { +func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) Run(run func(scheduled int, executed int, failed int, canceled int, backfilled int)) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { _c.Call.Run(func(args mock.Arguments) { var arg0 int if args[0] != nil { arg0 = args[0].(int) } - run(arg0) + var arg1 int + if args[1] != nil { + arg1 = args[1].(int) + } + var arg2 int + if args[2] != nil { + arg2 = args[2].(int) + } + var arg3 int + if args[3] != nil { + arg3 = args[3].(int) + } + var arg4 int + if args[4] != nil { + arg4 = args[4].(int) + } + run( + arg0, + arg1, + arg2, + arg3, + arg4, + ) }) return _c } -func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) Return() *ExtendedIndexingMetrics_NFTTransferIndexed_Call { +func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) Return() *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { _c.Call.Return() return _c } -func (_c *ExtendedIndexingMetrics_NFTTransferIndexed_Call) RunAndReturn(run func(int)) *ExtendedIndexingMetrics_NFTTransferIndexed_Call { +func (_c *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call) RunAndReturn(run func(scheduled int, executed int, failed int, canceled int, backfilled int)) *ExtendedIndexingMetrics_ScheduledTransactionIndexed_Call { _c.Run(run) return _c } From 2b7e7075a35dcf2ed3ff3c5f368ff74456cff2eb Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:46:21 -0800 Subject: [PATCH 13/18] apply feedback from review --- .../backend_scheduled_transactions.go | 1 - .../models/scheduled_transaction.go | 23 ++++++++++++++----- .../request/get_scheduled_transactions.go | 2 +- .../extended_indexing_scheduled_txs_test.go | 20 ++++------------ module/execution/scripts.go | 2 +- .../scheduled_transaction_requester.go | 5 ++++ .../extended/scheduled_transactions.go | 10 ++++++++ storage/indexes/scheduled_transactions.go | 4 ---- 8 files changed, 39 insertions(+), 28 deletions(-) diff --git a/access/backends/extended/backend_scheduled_transactions.go b/access/backends/extended/backend_scheduled_transactions.go index aefc646058a..f6354996948 100644 --- a/access/backends/extended/backend_scheduled_transactions.go +++ b/access/backends/extended/backend_scheduled_transactions.go @@ -155,7 +155,6 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransaction( } // GetScheduledTransactions returns a paginated list of scheduled transactions. -// When filter.Address is set, results are scoped to that address; otherwise all are returned. // // Expected error returns during normal operations: // - [codes.FailedPrecondition]: if the index has not been initialized diff --git a/engine/access/rest/experimental/models/scheduled_transaction.go b/engine/access/rest/experimental/models/scheduled_transaction.go index 1f651b99c01..42c2ef08837 100644 --- a/engine/access/rest/experimental/models/scheduled_transaction.go +++ b/engine/access/rest/experimental/models/scheduled_transaction.go @@ -16,12 +16,19 @@ func (t *ScheduledTransaction) Build( expand map[string]bool, ) error { t.Id = strconv.FormatUint(tx.ID, 10) + var priority ScheduledTransactionPriority - priority.Build(tx.Priority) + if err := priority.Build(tx.Priority); err != nil { + return fmt.Errorf("failed to build scheduled transaction priority: %w", err) + } t.Priority = &priority + var status ScheduledTransactionStatus - status.Build(tx.Status) + if err := status.Build(tx.Status); err != nil { + return fmt.Errorf("failed to build scheduled transaction status: %w", err) + } t.Status = &status + t.Timestamp = strconv.FormatUint(tx.Timestamp, 10) t.ExecutionEffort = strconv.FormatUint(tx.ExecutionEffort, 10) t.Fees = strconv.FormatUint(tx.Fees, 10) @@ -83,7 +90,7 @@ func (t *ScheduledTransaction) Build( } // Build sets the [ScheduledTransactionStatus] from a domain status value. -func (s *ScheduledTransactionStatus) Build(status accessmodel.ScheduledTransactionStatus) { +func (s *ScheduledTransactionStatus) Build(status accessmodel.ScheduledTransactionStatus) error { switch status { case accessmodel.ScheduledTxStatusScheduled: *s = SCHEDULED_ScheduledTransactionStatus @@ -94,19 +101,23 @@ func (s *ScheduledTransactionStatus) Build(status accessmodel.ScheduledTransacti case accessmodel.ScheduledTxStatusFailed: *s = FAILED_ScheduledTransactionStatus default: - *s = "" + return fmt.Errorf("unknown scheduled transaction status: %d", status) } + return nil } // Build sets the [ScheduledTransactionPriority] from a domain priority value. // The contract encodes priority as: 0 = high, 1 = medium, 2 = low. -func (p *ScheduledTransactionPriority) Build(priority accessmodel.ScheduledTransactionPriority) { +func (p *ScheduledTransactionPriority) Build(priority accessmodel.ScheduledTransactionPriority) error { switch priority { case accessmodel.ScheduledTxPriorityHigh: *p = HIGH_ScheduledTransactionPriority case accessmodel.ScheduledTxPriorityMedium: *p = MEDIUM_ScheduledTransactionPriority - default: + case accessmodel.ScheduledTxPriorityLow: *p = LOW_ScheduledTransactionPriority + default: + return fmt.Errorf("unknown scheduled transaction priority: %d", priority) } + return nil } diff --git a/engine/access/rest/experimental/request/get_scheduled_transactions.go b/engine/access/rest/experimental/request/get_scheduled_transactions.go index 893d6a274a6..016aededa54 100644 --- a/engine/access/rest/experimental/request/get_scheduled_transactions.go +++ b/engine/access/rest/experimental/request/get_scheduled_transactions.go @@ -107,7 +107,7 @@ func parseScheduledTxFilter(r *common.Request, filter *extended.ScheduledTransac if raw := r.GetQueryParam("status"); raw != "" { rawStatuses := strings.Split(raw, ",") for _, rawStatus := range rawStatuses { - s, err := accessmodel.ParseScheduledTransactionStatus(rawStatus) + s, err := accessmodel.ParseScheduledTransactionStatus(strings.TrimSpace(rawStatus)) if err != nil { return fmt.Errorf("invalid status: %w", err) } diff --git a/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go b/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go index 8006ccdbf25..8ec68cc8e57 100644 --- a/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go +++ b/integration/tests/access/cohort3/extended_indexing_scheduled_txs_test.go @@ -141,18 +141,12 @@ func (s *ExtendedIndexingSuite) verifyScheduledTxStatus(id uint64, expectedStatu // fetchAllScheduledTxs paginates through GET /experimental/v1/scheduled and returns all results. func (s *ExtendedIndexingSuite) fetchAllScheduledTxs(pageSize int) []map[string]any { - return s.collectScheduledPages( - fmt.Sprintf("%s/experimental/v1/scheduled?limit=%d", s.restBaseURL, pageSize), - pageSize, - ) + return s.collectScheduledPages(fmt.Sprintf("%s/experimental/v1/scheduled?limit=%d", s.restBaseURL, pageSize)) } // fetchAllScheduledTxsByAddress paginates through GET /experimental/v1/accounts/{address}/scheduled. func (s *ExtendedIndexingSuite) fetchAllScheduledTxsByAddress(address string, pageSize int) []map[string]any { - return s.collectScheduledPages( - fmt.Sprintf("%s/experimental/v1/accounts/%s/scheduled?limit=%d", s.restBaseURL, address, pageSize), - pageSize, - ) + return s.collectScheduledPages(fmt.Sprintf("%s/experimental/v1/accounts/%s/scheduled?limit=%d", s.restBaseURL, address, pageSize)) } // fetchScheduledTxsWithFilter fetches /experimental/v1/scheduled with the given query string filter. @@ -170,10 +164,7 @@ func (s *ExtendedIndexingSuite) fetchScheduledTxsWithFilter(filter string) []map // same total as fetching all at once. func (s *ExtendedIndexingSuite) verifyScheduledTxPagination() { allAtOnce := s.fetchAllScheduledTxs(100) - allPaged := s.collectScheduledPages( - fmt.Sprintf("%s/experimental/v1/scheduled?limit=1", s.restBaseURL), - 1, - ) + allPaged := s.collectScheduledPages(fmt.Sprintf("%s/experimental/v1/scheduled?limit=1", s.restBaseURL)) s.Require().Equal(len(allAtOnce), len(allPaged), "paginated results should equal unpaginated results") @@ -185,7 +176,7 @@ func (s *ExtendedIndexingSuite) verifyScheduledTxPagination() { } // collectScheduledPages follows next_cursor links to collect all transactions across all pages. -func (s *ExtendedIndexingSuite) collectScheduledPages(firstURL string, pageSize int) []map[string]any { +func (s *ExtendedIndexingSuite) collectScheduledPages(firstURL string) []map[string]any { var all []map[string]any url := firstURL for { @@ -200,8 +191,7 @@ func (s *ExtendedIndexingSuite) collectScheduledPages(firstURL string, pageSize if nextCursor == "" { break } - url = fmt.Sprintf("%s/experimental/v1/scheduled?limit=%d&cursor=%s", - s.restBaseURL, pageSize, nextCursor) + url = fmt.Sprintf("%s&cursor=%s", firstURL, nextCursor) } return all } diff --git a/module/execution/scripts.go b/module/execution/scripts.go index 43cbc2e364c..bdadaffa85c 100644 --- a/module/execution/scripts.go +++ b/module/execution/scripts.go @@ -20,8 +20,8 @@ import ( // RegisterAtHeight returns register value for provided register ID at the block height. // Even if the register wasn't indexed at the provided height, returns the highest height the register was indexed at. -// If the register with the ID was not indexed at all return nil value and no error. // Expected errors: +// - storage.ErrNotFound if block or register value at height was not found. // - storage.ErrHeightNotIndexed if the given height was not indexed yet or lower than the first indexed height. type RegisterAtHeight func(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) diff --git a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go index b150465e996..b6aea1c44a0 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go +++ b/module/state_synchronization/indexer/extended/scheduled_transaction_requester.go @@ -119,6 +119,11 @@ func (r *ScheduledTransactionRequester) fetchMissingTxs( return nil, fmt.Errorf("expected Array result, got %T", results) } + if len(array.Values) != len(batch) { + return nil, fmt.Errorf("expected %d results from scheduled transaction data script, got %d", + len(batch), len(array.Values)) + } + for i, result := range array.Values { opt, ok := result.(cadence.Optional) if !ok { diff --git a/module/state_synchronization/indexer/extended/scheduled_transactions.go b/module/state_synchronization/indexer/extended/scheduled_transactions.go index 57958f22cb2..a903d66af4a 100644 --- a/module/state_synchronization/indexer/extended/scheduled_transactions.go +++ b/module/state_synchronization/indexer/extended/scheduled_transactions.go @@ -207,6 +207,11 @@ func (s *ScheduledTransactions) IndexBlockData(lctx lockctx.Proof, data BlockDat newTxs = append(newTxs, missingTxs...) } + // at this point, all missing scheduled transactions should be present in the newTxs slice. + if len(newTxs) < len(missingIDs) { + return fmt.Errorf("missing backfilled scheduled transactions: expected %d, got %d", len(missingIDs), len(newTxs)) + } + // finally store all new transactions in a single call to Store since store may only be called // once per block. if err := s.store.Store(lctx, rw, data.Header.Height, newTxs); err != nil { @@ -343,6 +348,11 @@ func (s *ScheduledTransactions) collectScheduledTransactionData(data BlockData) // Any remaining pendingIDs were scheduled for execution but not executed — they failed. if len(pendingIDs) > 0 { + if pendingEventTxIndex == nil { + // this shouldn't be possible and indicates a bug in the indexer + return nil, fmt.Errorf("found pending scheduled transactions, but no PendingExecution event found in block %d", data.Header.Height) + } + // find the transaction that attempted to execute the scheduled transactions, and mark it as failed. // start searching from the system transaction that adds the scheduled transactions into the // system collection to reduce overhead. diff --git a/storage/indexes/scheduled_transactions.go b/storage/indexes/scheduled_transactions.go index c22107cdeab..1dd48050d63 100644 --- a/storage/indexes/scheduled_transactions.go +++ b/storage/indexes/scheduled_transactions.go @@ -157,8 +157,6 @@ func (idx *ScheduledTransactionsIndex) ByAddress( // rangeKeysAll computes the start and end keys for iterating over all scheduled transactions based // on the provided cursor. -// -// Any error indicates the cursor is invalid func (idx *ScheduledTransactionsIndex) rangeKeysAll(cursor *access.ScheduledTransactionCursor) (startKey, endKey []byte) { if cursor == nil { // keys include the one's complement of the ID, so iteration is in descending order of ids. @@ -174,8 +172,6 @@ func (idx *ScheduledTransactionsIndex) rangeKeysAll(cursor *access.ScheduledTran // rangeKeysAddress computes the start and end keys for iterating over scheduled transactions of an // account, based on the provided cursor. -// -// Any error indicates the cursor is invalid func (idx *ScheduledTransactionsIndex) rangeKeysAddress(address flow.Address, cursor *access.ScheduledTransactionCursor) (startKey, endKey []byte) { if cursor == nil { // keys include the one's complement of the ID, so iteration is in descending order of ids. From 999525bdcdc03e5a1deb973b5eb20339abf36ab3 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Tue, 3 Mar 2026 12:29:34 -0800 Subject: [PATCH 14/18] remove duplicate error mapping --- access/backends/extended/backend_base.go | 20 ------------------- .../backend_scheduled_transactions.go | 6 +++--- 2 files changed, 3 insertions(+), 23 deletions(-) diff --git a/access/backends/extended/backend_base.go b/access/backends/extended/backend_base.go index c4ae5519fff..58032fadf3f 100644 --- a/access/backends/extended/backend_base.go +++ b/access/backends/extended/backend_base.go @@ -6,14 +6,11 @@ import ( "fmt" "github.com/onflow/flow/protobuf/go/flow/entities" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/access/systemcollection" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) @@ -31,23 +28,6 @@ type backendBase struct { systemCollections *systemcollection.Versioned } -// mapReadError converts storage read errors to appropriate gRPC status errors. -func (b *backendBase) mapReadError(ctx context.Context, label string, err error) error { - switch { - case errors.Is(err, storage.ErrNotBootstrapped): - return status.Errorf(codes.FailedPrecondition, "%s index not initialized: %v", label, err) - case errors.Is(err, storage.ErrHeightNotIndexed): - return status.Errorf(codes.OutOfRange, "requested height not indexed: %v", err) - case errors.Is(err, storage.ErrInvalidQuery): - return status.Errorf(codes.InvalidArgument, "invalid query: %v", err) - case errors.Is(err, storage.ErrNotFound): - return status.Errorf(codes.NotFound, "not found: %v", err) - default: - irrecoverable.Throw(ctx, fmt.Errorf("failed to get %s: %w", label, err)) - return err - } -} - // normalizeLimit applies default page size when limit is 0, and returns an error if the limit // exceeds the configured maximum. // diff --git a/access/backends/extended/backend_scheduled_transactions.go b/access/backends/extended/backend_scheduled_transactions.go index f6354996948..6b274246d38 100644 --- a/access/backends/extended/backend_scheduled_transactions.go +++ b/access/backends/extended/backend_scheduled_transactions.go @@ -138,7 +138,7 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransaction( ) (*accessmodel.ScheduledTransaction, error) { tx, err := b.store.ByID(id) if err != nil { - return nil, b.mapReadError(ctx, "scheduled transaction", err) + return nil, mapReadError(ctx, "scheduled transaction", err) } if !expandOptions.HasExpand() { @@ -174,7 +174,7 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransactions( iter, err := b.store.All(cursor) if err != nil { - return nil, b.mapReadError(ctx, "scheduled transactions", err) + return nil, mapReadError(ctx, "scheduled transactions", err) } collected, nextCursor, err := iterator.CollectResults(iter, limit, filter.Filter()) @@ -225,7 +225,7 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransactionsByAddress( iter, err := b.store.ByAddress(address, cursor) if err != nil { - return nil, b.mapReadError(ctx, "scheduled transactions", err) + return nil, mapReadError(ctx, "scheduled transactions", err) } collected, nextCursor, err := iterator.CollectResults(iter, limit, filter.Filter()) From 85cf6c9f0d8b3dcb3ee7f917c0d296dbb558b9a2 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Tue, 3 Mar 2026 16:41:11 -0800 Subject: [PATCH 15/18] add created_at and completed_at timestamps to response --- access/backends/extended/backend.go | 1 + access/backends/extended/backend_base.go | 1 + .../backend_scheduled_transactions.go | 200 ++++++-- .../backend_scheduled_transactions_test.go | 485 ++++++++++++++++-- .../models/model_scheduled_transaction.go | 4 + .../models/scheduled_transaction.go | 8 + .../routes/scheduled_transactions_test.go | 17 +- model/access/scheduled_transaction.go | 4 + storage/indexes/scheduled_transactions.go | 6 + utils/visited/visited.go | 44 ++ utils/visited/visited_test.go | 86 ++++ 11 files changed, 762 insertions(+), 94 deletions(-) create mode 100644 utils/visited/visited.go create mode 100644 utils/visited/visited_test.go diff --git a/access/backends/extended/backend.go b/access/backends/extended/backend.go index 0a1b539176b..d99f95ca83a 100644 --- a/access/backends/extended/backend.go +++ b/access/backends/extended/backend.go @@ -89,6 +89,7 @@ func New( base := &backendBase{ config: config, headers: headers, + blocks: blocks, collections: collections, transactions: transactions, scheduledTransactions: scheduledTransactions, diff --git a/access/backends/extended/backend_base.go b/access/backends/extended/backend_base.go index 58032fadf3f..a63e43aff4b 100644 --- a/access/backends/extended/backend_base.go +++ b/access/backends/extended/backend_base.go @@ -20,6 +20,7 @@ type backendBase struct { config Config headers storage.Headers + blocks storage.Blocks collections storage.CollectionsReader transactions storage.TransactionsReader scheduledTransactions storage.ScheduledTransactionsReader diff --git a/access/backends/extended/backend_scheduled_transactions.go b/access/backends/extended/backend_scheduled_transactions.go index 6b274246d38..6f6ec6a47f4 100644 --- a/access/backends/extended/backend_scheduled_transactions.go +++ b/access/backends/extended/backend_scheduled_transactions.go @@ -2,6 +2,7 @@ package extended import ( "context" + "errors" "fmt" "strings" @@ -99,11 +100,11 @@ func (f *ScheduledTransactionFilter) Filter() storage.IndexFilter[*accessmodel.S type ScheduledTransactionsBackend struct { *backendBase - log zerolog.Logger - store storage.ScheduledTransactionsIndexReader - scheduledTxLookup storage.ScheduledTransactionsReader - state protocol.State - scriptExecutor execution.ScriptExecutor + log zerolog.Logger + store storage.ScheduledTransactionsIndexReader + scheduledTransactions storage.ScheduledTransactionsReader + state protocol.State + scriptExecutor execution.ScriptExecutor } // NewScheduledTransactionsBackend creates a new [ScheduledTransactionsBackend]. @@ -111,17 +112,17 @@ func NewScheduledTransactionsBackend( log zerolog.Logger, base *backendBase, store storage.ScheduledTransactionsIndexReader, - scheduledTxLookup storage.ScheduledTransactionsReader, + scheduledTransactions storage.ScheduledTransactionsReader, state protocol.State, scriptExecutor execution.ScriptExecutor, ) *ScheduledTransactionsBackend { return &ScheduledTransactionsBackend{ - backendBase: base, - log: log, - store: store, - scheduledTxLookup: scheduledTxLookup, - state: state, - scriptExecutor: scriptExecutor, + backendBase: base, + log: log, + store: store, + scheduledTransactions: scheduledTransactions, + state: state, + scriptExecutor: scriptExecutor, } } @@ -141,10 +142,6 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransaction( return nil, mapReadError(ctx, "scheduled transaction", err) } - if !expandOptions.HasExpand() { - return &tx, nil - } - if err := b.expand(ctx, &tx, expandOptions, encodingVersion); err != nil { err = fmt.Errorf("failed to expand scheduled transaction %d: %w", tx.ID, err) irrecoverable.Throw(ctx, err) @@ -189,13 +186,10 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransactions( NextCursor: nextCursor, } - if !expandOptions.HasExpand() { - return page, nil - } - for i := range page.Transactions { - if err := b.expand(ctx, &page.Transactions[i], expandOptions, encodingVersion); err != nil { - err = fmt.Errorf("failed to expand scheduled transaction %d: %w", page.Transactions[i].ID, err) + tx := &page.Transactions[i] + if err := b.expand(ctx, tx, expandOptions, encodingVersion); err != nil { + err = fmt.Errorf("failed to expand scheduled transaction %d: %w", tx.ID, err) irrecoverable.Throw(ctx, err) return nil, err } @@ -240,13 +234,10 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransactionsByAddress( NextCursor: nextCursor, } - if !expandOptions.HasExpand() { - return page, nil - } - for i := range page.Transactions { - if err := b.expand(ctx, &page.Transactions[i], expandOptions, encodingVersion); err != nil { - err = fmt.Errorf("failed to expand scheduled transaction %d: %w", page.Transactions[i].ID, err) + tx := &page.Transactions[i] + if err := b.expand(ctx, tx, expandOptions, encodingVersion); err != nil { + err = fmt.Errorf("failed to expand scheduled transaction %d: %w", tx.ID, err) irrecoverable.Throw(ctx, err) return nil, err } @@ -255,6 +246,127 @@ func (b *ScheduledTransactionsBackend) GetScheduledTransactionsByAddress( return page, nil } +// populateBlockTimestamps looks up the block headers for the creation and completion +// transactions and sets CreatedAt and CompletedAt on the transaction. +// +// No error returns are expected during normal operation. +func (b *ScheduledTransactionsBackend) populateBlockTimestamps( + tx *accessmodel.ScheduledTransaction, +) (executedHeader *flow.Header, err error) { + // `CreatedTransactionID` may be empty if this scheduled transaction was backfilled + if tx.CreatedTransactionID != flow.ZeroID { + header, err := b.lookupAnyTransactionBlock(tx.CreatedTransactionID) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("failed to get creation block timestamp for scheduled tx %d: %w", tx.ID, err) + } + if err == nil { + tx.CreatedAt = header.Timestamp + } + // if the created transaction was not found, don't populate the timestamp, but continue to + // return a response. the created transaction ID will be populated, so it will be clear this + // information is not available yet. + } + + switch tx.Status { + case accessmodel.ScheduledTxStatusExecuted, accessmodel.ScheduledTxStatusFailed: + header, err := b.lookupScheduledTransactionBlock(tx.ExecutedTransactionID) + if err != nil { + // if the scheduled transaction record was found in the extended index, then the scheduled + // transaction to block ID mapping must exist in storage. + err = irrecoverable.NewException(fmt.Errorf("failed to get completion block timestamp for scheduled tx %d: %w", tx.ID, err)) + return nil, err + } + // Note: the executed transaction header must be found, so the method can guarantee a header + // is returned for all executed scheduled transactions if no error is encountered. + tx.CompletedAt = header.Timestamp + return header, nil + + case accessmodel.ScheduledTxStatusCancelled: + header, err := b.lookupAnyTransactionBlock(tx.CancelledTransactionID) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("failed to get creation block timestamp for scheduled tx %d: %w", tx.ID, err) + } + if err == nil { + tx.CompletedAt = header.Timestamp + } + return nil, nil + // if the cancelled transaction was not found, don't populate the timestamp, but continue to + // return a response. the cancelled transaction ID will be populated, so it will be clear this + // information is not available yet. + } + + return nil, nil +} + +// lookupAnyTransactionBlock looks up the block timestamp for a transaction by its ID. +// It supports both scheduled and standard transactions. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if the transaction's block could not be resolved. +func (b *ScheduledTransactionsBackend) lookupAnyTransactionBlock(txID flow.Identifier) (*flow.Header, error) { + header, err := b.lookupScheduledTransactionBlock(txID) + if err == nil { + return header, nil + } + if !errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("failed to get block timestamp for scheduled tx %s: %w", txID, err) + } + // the transaction may not be a scheduled transaction, so try to look up the block for a + // standard transaction. + + header, err = b.lookupStandardTransactionBlock(txID) + if err != nil { + return nil, fmt.Errorf("failed to get block timestamp for standard tx %s: %w", txID, err) + } + + return header, nil +} + +// lookupStandardTransactionBlock looks up the block timestamp for a standard transaction by its ID. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if the transaction's block could not be resolved. +func (b *ScheduledTransactionsBackend) lookupStandardTransactionBlock(txID flow.Identifier) (*flow.Header, error) { + collection, err := b.collections.LightByTransactionID(txID) + if err != nil { + return nil, fmt.Errorf("failed to get collection for tx: %w", err) + } + collectionID := collection.ID() + + block, err := b.blocks.ByCollectionID(collectionID) + if err != nil { + // The txID → collectionID index (LightByTransactionID) and the collectionID → blockID index + // (checked here) are built by separate async components: the collection Indexer and + // the FinalizedBlockProcessor respectively. During catch-up or under load, the + // FinalizedBlockProcessor may lag behind, causing ErrNotFound here even though the + // collection is indexed. This is a transient state that resolves once finalization + // processing catches up. + // + // Note: this will also fail if the transaction is a system transaction. + return nil, fmt.Errorf("failed to get block ID for collection %s: %w", collectionID, err) + } + return block.ToHeader(), nil +} + +// lookupScheduledTransactionBlock looks up the block timestamp for a scheduled transaction by its ID. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound]: if the scheduled transaction did not exist in storage. +func (b *ScheduledTransactionsBackend) lookupScheduledTransactionBlock(txID flow.Identifier) (*flow.Header, error) { + blockID, err := b.scheduledTransactions.BlockIDByTransactionID(txID) + if err != nil { + return nil, fmt.Errorf("failed to get block ID for scheduled tx: %w", err) + } + + header, err := b.headers.ByBlockID(blockID) + if err != nil { + // if the scheduled transaction was found, the block must exist in storage. + err = irrecoverable.NewException(fmt.Errorf("failed to get header for block %s: %w", blockID, err)) + return nil, err + } + return header, nil +} + // expand enriches an executed scheduled transaction with its transaction result. // For non-executed transactions, this is a no-op. // @@ -265,6 +377,12 @@ func (b *ScheduledTransactionsBackend) expand( expandOptions ScheduledTransactionExpandOptions, encodingVersion entities.EventEncodingVersion, ) error { + // always populate the block timestamps + executedHeader, err := b.populateBlockTimestamps(tx) + if err != nil { + return fmt.Errorf("failed to get block timestamp for scheduled tx %d: %w", tx.ID, err) + } + if expandOptions.HandlerContract { err := b.expandHandlerContract(ctx, tx) if err != nil { @@ -281,43 +399,27 @@ func (b *ScheduledTransactionsBackend) expand( return nil } - txID, err := b.scheduledTxLookup.TransactionIDByID(tx.ID) - if err != nil { - // the transaction is marked as executed, so it must exist in storage. - return fmt.Errorf("failed to lookup transaction ID for scheduled tx %d: %w", tx.ID, err) - } - - blockID, err := b.scheduledTxLookup.BlockIDByTransactionID(txID) - if err != nil { - return fmt.Errorf("failed to lookup block ID for tx %s: %w", txID, err) - } - - header, err := b.headers.ByBlockID(blockID) - if err != nil { - return fmt.Errorf("failed to get header for block %s: %w", blockID, err) - } - if expandOptions.Transaction { - allScheduledTxs, err := b.transactionsProvider.ScheduledTransactionsByBlockID(ctx, header) + allScheduledTxs, err := b.transactionsProvider.ScheduledTransactionsByBlockID(ctx, executedHeader) if err != nil { return fmt.Errorf("could not retrieve all scheduled transactions: %w", err) } for _, scheduledTx := range allScheduledTxs { - if scheduledTx.ID() == txID { + if scheduledTx.ID() == tx.ExecutedTransactionID { tx.Transaction = scheduledTx break } } if tx.Transaction == nil { - return fmt.Errorf("scheduled transaction %s not found in block %s", txID, blockID) + return fmt.Errorf("scheduled transaction %s not found in block %s", tx.ExecutedTransactionID, executedHeader.ID()) } } if expandOptions.Result { - result, err := b.getTransactionResult(ctx, txID, header, true, expandOptions.Transaction, encodingVersion) + result, err := b.getTransactionResult(ctx, tx.ExecutedTransactionID, executedHeader, true, expandOptions.Transaction, encodingVersion) if err != nil { - return fmt.Errorf("failed to get transaction result for tx %s: %w", txID, err) + return fmt.Errorf("failed to get transaction result for tx %s: %w", tx.ExecutedTransactionID, err) } tx.Result = result } diff --git a/access/backends/extended/backend_scheduled_transactions_test.go b/access/backends/extended/backend_scheduled_transactions_test.go index 4155548ba25..fefd54af28d 100644 --- a/access/backends/extended/backend_scheduled_transactions_test.go +++ b/access/backends/extended/backend_scheduled_transactions_test.go @@ -356,29 +356,70 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { require.Error(t, err) }) - // expand is no-op for scheduled and cancelled statuses - for _, status := range []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusScheduled, accessmodel.ScheduledTxStatusCancelled} { - t.Run(fmt.Sprintf("expand is no-op for %s status", status), func(t *testing.T) { - store := storagemock.NewScheduledTransactionsIndexReader(t) - backend := NewScheduledTransactionsBackend( - unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, - ) + // expand is no-op for scheduled status (no block lookups, no result/transaction populated) + t.Run("expand is no-op for scheduled status", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + backend := NewScheduledTransactionsBackend( + unittest.Logger(), &backendBase{config: defaultConfig}, + store, nil, nil, nil, + ) - tx := accessmodel.ScheduledTransaction{ID: 1, Status: status} - store.On("ByID", uint64(1)).Return(tx, nil).Once() + tx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusScheduled} + store.On("ByID", uint64(1)).Return(tx, nil).Once() - // expand options set but status is Scheduled: no storage lookups expected - result, err := backend.GetScheduledTransaction( - context.Background(), 1, - ScheduledTransactionExpandOptions{Result: true, Transaction: true}, - defaultEncoding, - ) - require.NoError(t, err) - assert.Nil(t, result.Transaction) - assert.Nil(t, result.Result) - }) - } + // expand options set but status is Scheduled: no storage lookups expected + result, err := backend.GetScheduledTransaction( + context.Background(), 1, + ScheduledTransactionExpandOptions{Result: true, Transaction: true}, + defaultEncoding, + ) + require.NoError(t, err) + assert.Nil(t, result.Transaction) + assert.Nil(t, result.Result) + }) + + // for cancelled status: block timestamps are looked up, but result/transaction are not expanded + t.Run("expand is no-op for result/transaction on cancelled status", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + + cancelledTxID := unittest.IdentifierFixture() + cancelledCollection := &flow.LightCollection{Transactions: []flow.Identifier{cancelledTxID}} + cancelledBlock := unittest.BlockFixture() + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + collections: mockCollections, + blocks: mockBlocks, + }, + store, scheduledTxLookup, nil, nil, + ) + + tx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusCancelled, + CancelledTransactionID: cancelledTxID, + } + store.On("ByID", uint64(1)).Return(tx, nil).Once() + // CancelledTransactionID is a user tx: not in scheduled lookup, falls back to collection + scheduledTxLookup.On("BlockIDByTransactionID", cancelledTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", cancelledTxID).Return(cancelledCollection, nil).Once() + mockBlocks.On("ByCollectionID", cancelledCollection.ID()).Return(cancelledBlock, nil).Once() + + result, err := backend.GetScheduledTransaction( + context.Background(), 1, + ScheduledTransactionExpandOptions{Result: true, Transaction: true}, + defaultEncoding, + ) + require.NoError(t, err) + assert.Equal(t, cancelledBlock.Timestamp, result.CompletedAt) + assert.Nil(t, result.Transaction) + assert.Nil(t, result.Result) + }) // expand result works for executed and failed transactions for _, status := range []accessmodel.ScheduledTransactionStatus{accessmodel.ScheduledTxStatusExecuted, accessmodel.ScheduledTxStatusFailed} { @@ -402,7 +443,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status, ExecutedTransactionID: txID} expectedResult := &accessmodel.TransactionResult{ TransactionID: txID, BlockID: blockID, @@ -410,7 +451,6 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { } store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() mockProvider.On("TransactionResult", mocktestify.Anything, blockHeader, txID, mocktestify.Anything, defaultEncoding). @@ -451,10 +491,9 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status, ExecutedTransactionID: txID} store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). @@ -515,7 +554,76 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { assert.Equal(t, string(contractBody), result.HandlerContract.Body) }) - t.Run("TransactionIDByID error during expand triggers irrecoverable", func(t *testing.T) { + t.Run("created tx block not yet available returns zero CreatedAt", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + collections: mockCollections, + blocks: mockBlocks, + }, + store, scheduledTxLookup, nil, nil, + ) + + createdTxID := unittest.IdentifierFixture() + createdCollection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: createdTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(createdCollection, nil).Once() + // block not yet indexed by FinalizedBlockProcessor + mockBlocks.On("ByCollectionID", createdCollection.ID()).Return((*flow.Block)(nil), storage.ErrNotFound).Once() + + result, err := backend.GetScheduledTransaction(context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.NoError(t, err) + assert.Zero(t, result.CreatedAt) + }) + + t.Run("cancelled tx block not yet available returns zero CompletedAt", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + collections: mockCollections, + blocks: mockBlocks, + }, + store, scheduledTxLookup, nil, nil, + ) + + cancelledTxID := unittest.IdentifierFixture() + cancelledCollection := &flow.LightCollection{Transactions: []flow.Identifier{cancelledTxID}} + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusCancelled, + CancelledTransactionID: cancelledTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", cancelledTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", cancelledTxID).Return(cancelledCollection, nil).Once() + mockBlocks.On("ByCollectionID", cancelledCollection.ID()).Return((*flow.Block)(nil), storage.ErrNotFound).Once() + + result, err := backend.GetScheduledTransaction(context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.NoError(t, err) + assert.Zero(t, result.CompletedAt) + }) + + t.Run("BlockIDByTransactionID error during populateBlockTimestamps triggers irrecoverable", func(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) @@ -524,11 +632,12 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store, scheduledTxLookup, nil, nil, ) - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + txID := unittest.IdentifierFixture() + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} lookupErr := fmt.Errorf("lookup error") store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(flow.Identifier{}, lookupErr).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(flow.Identifier{}, lookupErr).Once() signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) @@ -549,11 +658,10 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { ) txID := unittest.IdentifierFixture() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} blockLookupErr := fmt.Errorf("block lookup error") store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(flow.Identifier{}, blockLookupErr).Once() signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) @@ -577,11 +685,10 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { txID := unittest.IdentifierFixture() blockID := unittest.IdentifierFixture() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} headerErr := fmt.Errorf("header lookup error") store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return((*flow.Header)(nil), headerErr).Once() @@ -613,11 +720,10 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { txID := unittest.IdentifierFixture() blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} providerErr := fmt.Errorf("provider error") store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). @@ -652,12 +758,11 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { txID := unittest.IdentifierFixture() blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} // otherTxBody.ID() != txID otherTxBody := unittest.TransactionBodyFixture() store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). @@ -691,11 +796,10 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { txID := unittest.IdentifierFixture() blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted} + storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} resultErr := fmt.Errorf("result lookup error") store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(txID, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() mockProvider.On("TransactionResult", mocktestify.Anything, blockHeader, txID, mocktestify.Anything, defaultEncoding). @@ -839,7 +943,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { txs := []accessmodel.ScheduledTransaction{ {ID: 5, Status: accessmodel.ScheduledTxStatusScheduled}, - {ID: 3, Status: accessmodel.ScheduledTxStatusExecuted}, + {ID: 3, Status: accessmodel.ScheduledTxStatusScheduled}, } store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). @@ -865,7 +969,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { // limit=2, provide 3 items: CollectResults collects 2, then peeks at item 3 to build cursor txs := []accessmodel.ScheduledTransaction{ {ID: 5, Status: accessmodel.ScheduledTxStatusScheduled}, - {ID: 3, Status: accessmodel.ScheduledTxStatusExecuted}, + {ID: 3, Status: accessmodel.ScheduledTxStatusScheduled}, {ID: 1, Status: accessmodel.ScheduledTxStatusScheduled}, } @@ -1029,14 +1133,15 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store, scheduledTxLookup, nil, nil, ) + txID := unittest.IdentifierFixture() txs := []accessmodel.ScheduledTransaction{ - {ID: 1, Status: accessmodel.ScheduledTxStatusExecuted}, + {ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID}, } lookupErr := fmt.Errorf("lookup failed") store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). Return(makeScheduledTxIter(txs), nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(flow.Identifier{}, lookupErr).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(flow.Identifier{}, lookupErr).Once() signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) @@ -1048,6 +1153,44 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { require.Error(t, err) verifyThrown() }) + + t.Run("tx block not yet available returns zero timestamps", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + collections: mockCollections, + blocks: mockBlocks, + }, + store, scheduledTxLookup, nil, nil, + ) + + createdTxID := unittest.IdentifierFixture() + createdCollection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + txs := []accessmodel.ScheduledTransaction{ + {ID: 1, Status: accessmodel.ScheduledTxStatusScheduled, CreatedTransactionID: createdTxID}, + } + + store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(createdCollection, nil).Once() + mockBlocks.On("ByCollectionID", createdCollection.ID()).Return((*flow.Block)(nil), storage.ErrNotFound).Once() + + page, err := backend.GetScheduledTransactions( + context.Background(), 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + require.Len(t, page.Transactions, 1) + assert.Zero(t, page.Transactions[0].CreatedAt) + }) } // TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress tests all code paths for the @@ -1220,14 +1363,15 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi ) addr := unittest.RandomAddressFixture() + txID := unittest.IdentifierFixture() txs := []accessmodel.ScheduledTransaction{ - {ID: 1, Status: accessmodel.ScheduledTxStatusExecuted}, + {ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID}, } lookupErr := fmt.Errorf("lookup failed") store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). Return(makeScheduledTxIter(txs), nil).Once() - scheduledTxLookup.On("TransactionIDByID", uint64(1)).Return(flow.Identifier{}, lookupErr).Once() + scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(flow.Identifier{}, lookupErr).Once() signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) @@ -1239,4 +1383,257 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi require.Error(t, err) verifyThrown() }) + + t.Run("tx block not yet available returns zero timestamps", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + + backend := NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + collections: mockCollections, + blocks: mockBlocks, + }, + store, scheduledTxLookup, nil, nil, + ) + + addr := unittest.RandomAddressFixture() + createdTxID := unittest.IdentifierFixture() + createdCollection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + txs := []accessmodel.ScheduledTransaction{ + {ID: 1, Status: accessmodel.ScheduledTxStatusScheduled, CreatedTransactionID: createdTxID}, + } + + store.On("ByAddress", addr, (*accessmodel.ScheduledTransactionCursor)(nil)). + Return(makeScheduledTxIter(txs), nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(createdCollection, nil).Once() + mockBlocks.On("ByCollectionID", createdCollection.ID()).Return((*flow.Block)(nil), storage.ErrNotFound).Once() + + page, err := backend.GetScheduledTransactionsByAddress( + context.Background(), addr, 0, nil, + ScheduledTransactionFilter{}, ScheduledTransactionExpandOptions{}, + defaultEncoding, + ) + require.NoError(t, err) + require.Len(t, page.Transactions, 1) + assert.Zero(t, page.Transactions[0].CreatedAt) + }) +} + +// TestScheduledTransactionsBackend_PopulateBlockTimestamps verifies that populateBlockTimestamps +// correctly resolves CreatedAt and CompletedAt from the stored transaction IDs. +func TestScheduledTransactionsBackend_PopulateBlockTimestamps(t *testing.T) { + t.Parallel() + + defaultConfig := DefaultConfig() + defaultEncoding := entities.EventEncodingVersion_JSON_CDC_V0 + + // helper builds a full backend with all mocks configured + makeBackend := func( + store *storagemock.ScheduledTransactionsIndexReader, + scheduledTxLookup *storagemock.ScheduledTransactionsReader, + mockHeaders *storagemock.Headers, + mockCollections *storagemock.CollectionsReader, + mockBlocks *storagemock.Blocks, + ) *ScheduledTransactionsBackend { + return NewScheduledTransactionsBackend( + unittest.Logger(), + &backendBase{ + config: defaultConfig, + headers: mockHeaders, + blocks: mockBlocks, + collections: mockCollections, + }, + store, scheduledTxLookup, nil, nil, + ) + } + + t.Run("created_at populated for non-placeholder tx with CreatedTransactionID", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + backend := makeBackend(store, scheduledTxLookup, nil, mockCollections, mockBlocks) + + createdTxID := unittest.IdentifierFixture() + collection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + block := unittest.BlockFixture() + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: createdTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + // createdTxID is a user transaction: not in scheduled tx lookup, falls back to collection + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(collection, nil).Once() + mockBlocks.On("ByCollectionID", collection.ID()).Return(block, nil).Once() + + result, err := backend.GetScheduledTransaction(context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.NoError(t, err) + assert.Equal(t, block.Timestamp, result.CreatedAt) + assert.Zero(t, result.CompletedAt) + }) + + t.Run("created_at absent for placeholder tx", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + backend := makeBackend(store, scheduledTxLookup, nil, nil, nil) + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + IsPlaceholder: true, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + // no lookups expected: placeholder tx skips CreatedAt, Scheduled status skips CompletedAt + + result, err := backend.GetScheduledTransaction(context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.NoError(t, err) + assert.Zero(t, result.CreatedAt) + assert.Zero(t, result.CompletedAt) + }) + + t.Run("completed_at populated for executed tx via scheduledTxLookup", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockHeaders := storagemock.NewHeaders(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + backend := makeBackend(store, scheduledTxLookup, mockHeaders, mockCollections, mockBlocks) + + createdTxID := unittest.IdentifierFixture() + executedTxID := unittest.IdentifierFixture() + createdCollection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + createdBlock := unittest.BlockFixture() + executedBlockHeader := unittest.BlockHeaderFixture() + executedBlockID := executedBlockHeader.ID() + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusExecuted, + CreatedTransactionID: createdTxID, + ExecutedTransactionID: executedTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + // createdTxID is a user transaction: scheduledTxLookup returns ErrNotFound, falls back to collection + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(createdCollection, nil).Once() + mockBlocks.On("ByCollectionID", createdCollection.ID()).Return(createdBlock, nil).Once() + // executedTxID is a system transaction: scheduledTxLookup succeeds + scheduledTxLookup.On("BlockIDByTransactionID", executedTxID).Return(executedBlockID, nil).Once() + mockHeaders.On("ByBlockID", executedBlockID).Return(executedBlockHeader, nil).Once() + + result, err := backend.GetScheduledTransaction(context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.NoError(t, err) + assert.Equal(t, createdBlock.Timestamp, result.CreatedAt) + assert.Equal(t, executedBlockHeader.Timestamp, result.CompletedAt) + }) + + t.Run("completed_at populated for cancelled tx via collection lookup", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + backend := makeBackend(store, scheduledTxLookup, nil, mockCollections, mockBlocks) + + createdTxID := unittest.IdentifierFixture() + cancelledTxID := unittest.IdentifierFixture() + createdCollection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + cancelledCollection := &flow.LightCollection{Transactions: []flow.Identifier{cancelledTxID}} + createdBlock := unittest.BlockFixture() + cancelledBlock := unittest.BlockFixture() + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusCancelled, + CreatedTransactionID: createdTxID, + CancelledTransactionID: cancelledTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + // both are user transactions: not in scheduled tx lookup, fall back to collection + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(createdCollection, nil).Once() + mockBlocks.On("ByCollectionID", createdCollection.ID()).Return(createdBlock, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", cancelledTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", cancelledTxID).Return(cancelledCollection, nil).Once() + mockBlocks.On("ByCollectionID", cancelledCollection.ID()).Return(cancelledBlock, nil).Once() + + result, err := backend.GetScheduledTransaction(context.Background(), 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.NoError(t, err) + assert.Equal(t, createdBlock.Timestamp, result.CreatedAt) + assert.Equal(t, cancelledBlock.Timestamp, result.CompletedAt) + }) + + t.Run("collection lookup error for created tx triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + backend := makeBackend(store, scheduledTxLookup, nil, mockCollections, nil) + + createdTxID := unittest.IdentifierFixture() + lookupErr := fmt.Errorf("collection lookup error") + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusScheduled, + CreatedTransactionID: createdTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + // not in scheduled lookup, falls back to collection which errors + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return((*flow.LightCollection)(nil), lookupErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction(signalerCtx, 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.Error(t, err) + verifyThrown() + }) + + t.Run("BlockIDByTransactionID error for executed tx triggers irrecoverable", func(t *testing.T) { + store := storagemock.NewScheduledTransactionsIndexReader(t) + scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) + mockCollections := storagemock.NewCollectionsReader(t) + mockBlocks := storagemock.NewBlocks(t) + mockHeaders := storagemock.NewHeaders(t) + backend := makeBackend(store, scheduledTxLookup, mockHeaders, mockCollections, mockBlocks) + + createdTxID := unittest.IdentifierFixture() + executedTxID := unittest.IdentifierFixture() + createdCollection := &flow.LightCollection{Transactions: []flow.Identifier{createdTxID}} + createdBlock := unittest.BlockFixture() + lookupErr := fmt.Errorf("block lookup error") + + storedTx := accessmodel.ScheduledTransaction{ + ID: 1, + Status: accessmodel.ScheduledTxStatusExecuted, + CreatedTransactionID: createdTxID, + ExecutedTransactionID: executedTxID, + } + + store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + // createdTxID falls back to collection + scheduledTxLookup.On("BlockIDByTransactionID", createdTxID).Return(flow.Identifier{}, storage.ErrNotFound).Once() + mockCollections.On("LightByTransactionID", createdTxID).Return(createdCollection, nil).Once() + mockBlocks.On("ByCollectionID", createdCollection.ID()).Return(createdBlock, nil).Once() + // executedTxID lookup returns an unexpected error + scheduledTxLookup.On("BlockIDByTransactionID", executedTxID).Return(flow.Identifier{}, lookupErr).Once() + + signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) + + _, err := backend.GetScheduledTransaction(signalerCtx, 1, ScheduledTransactionExpandOptions{}, defaultEncoding) + require.Error(t, err) + verifyThrown() + }) } diff --git a/engine/access/rest/experimental/models/model_scheduled_transaction.go b/engine/access/rest/experimental/models/model_scheduled_transaction.go index 8d3120340c4..216a2513ccf 100644 --- a/engine/access/rest/experimental/models/model_scheduled_transaction.go +++ b/engine/access/rest/experimental/models/model_scheduled_transaction.go @@ -35,6 +35,10 @@ type ScheduledTransaction struct { CreatedTransactionId string `json:"created_transaction_id"` ExecutedTransactionId string `json:"executed_transaction_id,omitempty"` CancelledTransactionId string `json:"cancelled_transaction_id,omitempty"` + // RFC3339Nano timestamp of the block in which the scheduled transaction was created. Absent for placeholder transactions. + CreatedAt string `json:"created_at,omitempty"` + // RFC3339Nano timestamp of the block in which the scheduled transaction was executed or cancelled. Absent when still scheduled. + CompletedAt string `json:"completed_at,omitempty"` // True if the scheduled transaction was created during bootstrapping based on the current chain state, not based on a protocol event. When true, block_height, transaction_id, tx_index, and event_index are absent. IsPlaceholder bool `json:"is_placeholder,omitempty"` Transaction *commonmodels.Transaction `json:"transaction,omitempty"` diff --git a/engine/access/rest/experimental/models/scheduled_transaction.go b/engine/access/rest/experimental/models/scheduled_transaction.go index 42c2ef08837..bf78be04993 100644 --- a/engine/access/rest/experimental/models/scheduled_transaction.go +++ b/engine/access/rest/experimental/models/scheduled_transaction.go @@ -3,6 +3,7 @@ package models import ( "fmt" "strconv" + "time" commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" accessmodel "github.com/onflow/flow-go/model/access" @@ -55,6 +56,13 @@ func (t *ScheduledTransaction) Build( t.IsPlaceholder = tx.IsPlaceholder + if tx.CreatedAt != 0 { + t.CreatedAt = time.UnixMilli(int64(tx.CreatedAt)).UTC().Format(time.RFC3339Nano) + } + if tx.CompletedAt != 0 { + t.CompletedAt = time.UnixMilli(int64(tx.CompletedAt)).UTC().Format(time.RFC3339Nano) + } + t.Expandable = new(ScheduledTransactionExpandable) if tx.Transaction != nil { diff --git a/engine/access/rest/experimental/routes/scheduled_transactions_test.go b/engine/access/rest/experimental/routes/scheduled_transactions_test.go index 53c81c7cd03..c05181e1387 100644 --- a/engine/access/rest/experimental/routes/scheduled_transactions_test.go +++ b/engine/access/rest/experimental/routes/scheduled_transactions_test.go @@ -5,6 +5,7 @@ import ( "net/http" "net/url" "testing" + "time" "github.com/stretchr/testify/assert" mocktestify "github.com/stretchr/testify/mock" @@ -91,6 +92,7 @@ func testEncodeScheduledTxCursor(t *testing.T, id uint64) string { func TestGetScheduledTransactions(t *testing.T) { handlerOwner := unittest.AddressFixture() + tx1CreatedAt := uint64(1700000000000) // Unix ms tx1CreatedID := unittest.IdentifierFixture() tx1 := accessmodel.ScheduledTransaction{ ID: 100, @@ -103,7 +105,10 @@ func TestGetScheduledTransactions(t *testing.T) { TransactionHandlerUUID: 7, Status: accessmodel.ScheduledTxStatusScheduled, CreatedTransactionID: tx1CreatedID, + CreatedAt: tx1CreatedAt, } + tx2CreatedAt := uint64(1699999000000) // Unix ms + tx2CompletedAt := uint64(1700001000000) // Unix ms tx2CreatedID := unittest.IdentifierFixture() tx2ExecutedID := unittest.IdentifierFixture() tx2 := accessmodel.ScheduledTransaction{ @@ -118,6 +123,8 @@ func TestGetScheduledTransactions(t *testing.T) { Status: accessmodel.ScheduledTxStatusExecuted, CreatedTransactionID: tx2CreatedID, ExecutedTransactionID: tx2ExecutedID, + CreatedAt: tx2CreatedAt, + CompletedAt: tx2CompletedAt, } t.Run("happy path with next cursor", func(t *testing.T) { @@ -145,6 +152,9 @@ func TestGetScheduledTransactions(t *testing.T) { assert.Equal(t, http.StatusOK, rr.Code) expectedNextCursor := testEncodeScheduledTxCursor(t, 99) + tx1CreatedAtStr := time.UnixMilli(int64(tx1CreatedAt)).UTC().Format(time.RFC3339Nano) + tx2CreatedAtStr := time.UnixMilli(int64(tx2CreatedAt)).UTC().Format(time.RFC3339Nano) + tx2CompletedAtStr := time.UnixMilli(int64(tx2CompletedAt)).UTC().Format(time.RFC3339Nano) expected := fmt.Sprintf(`{ "scheduled_transactions": [ { @@ -158,6 +168,7 @@ func TestGetScheduledTransactions(t *testing.T) { "transaction_handler_type_identifier": "A.0000.MyScheduler.Handler", "transaction_handler_uuid": "7", "created_transaction_id": "%s", + "created_at": "%s", "_expandable": { "handler_contract": "handler_contract" } @@ -174,6 +185,8 @@ func TestGetScheduledTransactions(t *testing.T) { "transaction_handler_uuid": "8", "created_transaction_id": "%s", "executed_transaction_id": "%s", + "created_at": "%s", + "completed_at": "%s", "_expandable": { "transaction": "/v1/transactions/%s", "result": "/v1/transaction_results/%s", @@ -182,7 +195,9 @@ func TestGetScheduledTransactions(t *testing.T) { } ], "next_cursor": "%s" - }`, handlerOwner.String(), tx1CreatedID.String(), handlerOwner.String(), tx2CreatedID.String(), tx2ExecutedID.String(), tx2ExecutedID.String(), tx2ExecutedID.String(), expectedNextCursor) + }`, handlerOwner.String(), tx1CreatedID.String(), tx1CreatedAtStr, + handlerOwner.String(), tx2CreatedID.String(), tx2ExecutedID.String(), tx2CreatedAtStr, tx2CompletedAtStr, + tx2ExecutedID.String(), tx2ExecutedID.String(), expectedNextCursor) assert.JSONEq(t, expected, rr.Body.String()) }) diff --git a/model/access/scheduled_transaction.go b/model/access/scheduled_transaction.go index 231958f9904..f37431f7b20 100644 --- a/model/access/scheduled_transaction.go +++ b/model/access/scheduled_transaction.go @@ -122,6 +122,10 @@ type ScheduledTransaction struct { Transaction *flow.TransactionBody `msgpack:"-"` // Transaction body (nil unless expanded) Result *TransactionResult `msgpack:"-"` // Transaction result (nil unless expanded) HandlerContract *Contract `msgpack:"-"` // Handler contract (nil unless expanded) + + // Timestamp fields are populated by the backend. Never persisted. Zero when not applicable. + CreatedAt uint64 `msgpack:"-"` // Unix ms timestamp of block in which the scheduled transaction was created + CompletedAt uint64 `msgpack:"-"` // Unix ms timestamp of block in which the scheduled transaction was executed or cancelled } // ScheduledTransactionCursor identifies a position in the scheduled transaction index for diff --git a/storage/indexes/scheduled_transactions.go b/storage/indexes/scheduled_transactions.go index 1dd48050d63..40e29434f91 100644 --- a/storage/indexes/scheduled_transactions.go +++ b/storage/indexes/scheduled_transactions.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/indexes/iterator" "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/utils/visited" ) const ( @@ -212,7 +213,12 @@ func (idx *ScheduledTransactionsIndex) Store( // - [storage.ErrAlreadyExists]: if any scheduled transaction ID already exists func storeAllScheduledTransactions(rw storage.ReaderBatchWriter, scheduledTxs []access.ScheduledTransaction) error { writer := rw.Writer() + seen := visited.New[uint64]() for _, tx := range scheduledTxs { + if seen.Visit(tx.ID) { + return fmt.Errorf("scheduled transaction %d appears more than once in batch: %w", tx.ID, storage.ErrAlreadyExists) + } + primaryKey := makeScheduledTxPrimaryKey(tx.ID) exists, err := operation.KeyExists(rw.GlobalReader(), primaryKey) diff --git a/utils/visited/visited.go b/utils/visited/visited.go new file mode 100644 index 00000000000..cdba777ea96 --- /dev/null +++ b/utils/visited/visited.go @@ -0,0 +1,44 @@ +package visited + +// Visited is a simple object that tracks whether or not a particular value has been visited. +// Use this when iterating over a collection and you need to know if a value has been encountered. +// +// CAUTION: Not concurrency safe. +type Visited[T comparable] struct { + m map[T]struct{} +} + +func New[T comparable]() Visited[T] { + return Visited[T]{ + m: make(map[T]struct{}), + } +} + +// Visit returns true if the value has been visited, false otherwise. +// It also adds the value to the set of visited values. +// +// CAUTION: Not concurrency safe. +func (s *Visited[T]) Visit(key T) bool { + if _, ok := s.m[key]; ok { + return true + } + s.m[key] = struct{}{} + return false +} + +// PeekVisited returns true if the value has been visited, false otherwise. +// It does not add the value to the set of visited values. +// Use this for use cases where marking the value as visited needs to happen after processing. +// +// CAUTION: Not concurrency safe. +func (s *Visited[T]) PeekVisited(key T) bool { + _, ok := s.m[key] + return ok +} + +// Count returns the number of visited values. +// +// CAUTION: Not concurrency safe. +func (s *Visited[T]) Count() int { + return len(s.m) +} diff --git a/utils/visited/visited_test.go b/utils/visited/visited_test.go new file mode 100644 index 00000000000..8f19d630187 --- /dev/null +++ b/utils/visited/visited_test.go @@ -0,0 +1,86 @@ +package visited + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestVisited_Visit verifies that Visit returns false on first encounter and true on repeat. +func TestVisited_Visit(t *testing.T) { + t.Run("first visit returns false", func(t *testing.T) { + v := New[int]() + assert.False(t, v.Visit(1)) + }) + + t.Run("second visit returns true", func(t *testing.T) { + v := New[int]() + v.Visit(1) + assert.True(t, v.Visit(1)) + }) + + t.Run("subsequent visits all return true", func(t *testing.T) { + v := New[int]() + v.Visit(1) + assert.True(t, v.Visit(1)) + assert.True(t, v.Visit(1)) + }) + + t.Run("distinct values tracked independently", func(t *testing.T) { + v := New[string]() + assert.False(t, v.Visit("a")) + assert.False(t, v.Visit("b")) + assert.True(t, v.Visit("a")) + assert.True(t, v.Visit("b")) + }) +} + +// TestVisited_Count verifies that Count reflects the number of unique visited values. +func TestVisited_Count(t *testing.T) { + t.Run("zero on empty", func(t *testing.T) { + v := New[int]() + assert.Equal(t, 0, v.Count()) + }) + + t.Run("increments on each new value", func(t *testing.T) { + v := New[int]() + v.Visit(1) + assert.Equal(t, 1, v.Count()) + v.Visit(2) + assert.Equal(t, 2, v.Count()) + }) + + t.Run("does not increment on repeat visit", func(t *testing.T) { + v := New[int]() + v.Visit(1) + v.Visit(1) + assert.Equal(t, 1, v.Count()) + }) + + t.Run("PeekVisited does not increment count", func(t *testing.T) { + v := New[int]() + v.PeekVisited(1) + assert.Equal(t, 0, v.Count()) + }) +} + +// TestVisited_PeekVisited verifies that PeekVisited reports membership without mutating the set. +func TestVisited_PeekVisited(t *testing.T) { + t.Run("returns false for unvisited key", func(t *testing.T) { + v := New[int]() + assert.False(t, v.PeekVisited(42)) + }) + + t.Run("returns true for visited key", func(t *testing.T) { + v := New[int]() + v.Visit(42) + assert.True(t, v.PeekVisited(42)) + }) + + t.Run("does not add key to visited set", func(t *testing.T) { + v := New[int]() + v.PeekVisited(42) + // Visit should still return false — PeekVisited must not have mutated the set. + assert.False(t, v.Visit(42)) + }) +} From ba2c8474b73ef57ba021bd856ba8a6bbdf251a6b Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Mon, 9 Mar 2026 20:09:34 -0700 Subject: [PATCH 16/18] fixes from review --- .../request/get_scheduled_transactions.go | 14 ++++++++++---- .../routes/scheduled_transactions_test.go | 2 +- model/access/scheduled_transaction.go | 15 +++++++++++++-- storage/indexes/scheduled_transactions.go | 6 ++++-- 4 files changed, 28 insertions(+), 9 deletions(-) diff --git a/engine/access/rest/experimental/request/get_scheduled_transactions.go b/engine/access/rest/experimental/request/get_scheduled_transactions.go index 016aededa54..60e09465fcb 100644 --- a/engine/access/rest/experimental/request/get_scheduled_transactions.go +++ b/engine/access/rest/experimental/request/get_scheduled_transactions.go @@ -14,7 +14,6 @@ import ( // GetScheduledTransactions holds parsed request params for the list endpoints. type GetScheduledTransactions struct { - Address *flow.Address Limit uint32 Cursor *accessmodel.ScheduledTransactionCursor Filter extended.ScheduledTransactionFilter @@ -52,7 +51,7 @@ func NewGetScheduledTransactions(r *common.Request) (GetScheduledTransactions, e return req, nil } -// GetScheduledTransactions holds parsed request params for the list endpoints. +// GetAccountScheduledTransactions holds parsed request params for the list endpoints. type GetAccountScheduledTransactions struct { GetScheduledTransactions Address flow.Address @@ -72,6 +71,10 @@ func NewGetScheduledTransactionsByAddress(r *common.Request) (GetAccountSchedule return GetAccountScheduledTransactions{}, err } + if req.Filter.TransactionHandlerOwner != nil && *req.Filter.TransactionHandlerOwner != address { + return GetAccountScheduledTransactions{}, fmt.Errorf("handler_owner must be the same as the address") + } + return GetAccountScheduledTransactions{ GetScheduledTransactions: req, Address: address, @@ -105,8 +108,7 @@ func NewGetScheduledTransaction(r *common.Request) (GetScheduledTransaction, err // All errors indicate an invalid request. func parseScheduledTxFilter(r *common.Request, filter *extended.ScheduledTransactionFilter) error { if raw := r.GetQueryParam("status"); raw != "" { - rawStatuses := strings.Split(raw, ",") - for _, rawStatus := range rawStatuses { + for rawStatus := range strings.SplitSeq(raw, ",") { s, err := accessmodel.ParseScheduledTransactionStatus(strings.TrimSpace(rawStatus)) if err != nil { return fmt.Errorf("invalid status: %w", err) @@ -139,6 +141,10 @@ func parseScheduledTxFilter(r *common.Request, filter *extended.ScheduledTransac filter.EndTime = &v } + if filter.StartTime != nil && filter.EndTime != nil && *filter.StartTime > *filter.EndTime { + return fmt.Errorf("start_time must be before end_time") + } + if raw := r.GetQueryParam("handler_owner"); raw != "" { addr, err := parser.ParseAddress(raw, r.Chain) if err != nil { diff --git a/engine/access/rest/experimental/routes/scheduled_transactions_test.go b/engine/access/rest/experimental/routes/scheduled_transactions_test.go index c05181e1387..a812fc681e4 100644 --- a/engine/access/rest/experimental/routes/scheduled_transactions_test.go +++ b/engine/access/rest/experimental/routes/scheduled_transactions_test.go @@ -107,7 +107,7 @@ func TestGetScheduledTransactions(t *testing.T) { CreatedTransactionID: tx1CreatedID, CreatedAt: tx1CreatedAt, } - tx2CreatedAt := uint64(1699999000000) // Unix ms + tx2CreatedAt := uint64(1699999000000) // Unix ms tx2CompletedAt := uint64(1700001000000) // Unix ms tx2CreatedID := unittest.IdentifierFixture() tx2ExecutedID := unittest.IdentifierFixture() diff --git a/model/access/scheduled_transaction.go b/model/access/scheduled_transaction.go index f37431f7b20..79026cc500b 100644 --- a/model/access/scheduled_transaction.go +++ b/model/access/scheduled_transaction.go @@ -104,11 +104,22 @@ type ScheduledTransaction struct { Status ScheduledTransactionStatus - CreatedTransactionID flow.Identifier - ExecutedTransactionID flow.Identifier + // CreatedTransactionID is the transaction ID of the transaction in which the scheduled transaction was created + // It is always set unless the scheduled transaction is a placeholder, in which case IsPlaceholder is true. + CreatedTransactionID flow.Identifier + + // ExecutedTransactionID is the transaction ID of the transaction in which the scheduled transaction was executed + // If set, status is set to [ScheduledTxStatusExecuted]. + ExecutedTransactionID flow.Identifier + + // CancelledTransactionID is the transaction ID of the transaction in which the scheduled transaction was cancelled + // If set, status is set to [ScheduledTxStatusCancelled]. CancelledTransactionID flow.Identifier + // FeesReturned is the amount of fees returned to the scheduled transaction's owner when the scheduled transaction was cancelled FeesReturned uint64 + + // FeesDeducted is the amount of fees deducted from the scheduled transaction's owner when the scheduled transaction was cancelled FeesDeducted uint64 // IsPlaceholder is true if the scheduled transaction was created based on the current chain state, diff --git a/storage/indexes/scheduled_transactions.go b/storage/indexes/scheduled_transactions.go index 40e29434f91..e6ac06f3ca4 100644 --- a/storage/indexes/scheduled_transactions.go +++ b/storage/indexes/scheduled_transactions.go @@ -17,10 +17,12 @@ import ( ) const ( + // idLen is the length of the uint64 ID in bytes + idLen = 8 // scheduledTxPrimaryKeyLen is [code(1)][~id(8)] = 9 bytes - scheduledTxPrimaryKeyLen = 1 + heightLen + scheduledTxPrimaryKeyLen = 1 + idLen // scheduledTxByAddrKeyLen is [code(1)][address(8)][~id(8)] = 17 bytes - scheduledTxByAddrKeyLen = 1 + flow.AddressLength + heightLen + scheduledTxByAddrKeyLen = 1 + flow.AddressLength + idLen ) // ScheduledTransactionsIndex implements [storage.ScheduledTransactionsIndex] using Pebble. From d12cf3f90af4e05cfa07a53b65108bb05fdd2f92 Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:14:28 -0700 Subject: [PATCH 17/18] add method to get a single scheduled tx body --- access/backends/extended/backend.go | 2 +- .../backend_scheduled_transactions.go | 20 ++-- fvm/blueprints/scheduled_callback.go | 58 +++++++--- fvm/blueprints/scheduled_callback_test.go | 102 ++++++++++++++++++ model/access/system_collection.go | 5 + .../systemcollection/system_collection_v0.go | 27 +++++ .../system_collection_v0_test.go | 8 ++ .../systemcollection/system_collection_v1.go | 7 ++ .../system_collection_v1_test.go | 8 ++ 9 files changed, 211 insertions(+), 26 deletions(-) diff --git a/access/backends/extended/backend.go b/access/backends/extended/backend.go index d99f95ca83a..d72bdfe9af7 100644 --- a/access/backends/extended/backend.go +++ b/access/backends/extended/backend.go @@ -102,7 +102,7 @@ func New( log: log, AccountTransactionsBackend: NewAccountTransactionsBackend(log, base, store, chain), AccountTransfersBackend: NewAccountTransfersBackend(log, base, ftStore, nftStore, chain), - ScheduledTransactionsBackend: NewScheduledTransactionsBackend(log, base, scheduledTxIndex, scheduledTransactions, state, scriptExecutor), + ScheduledTransactionsBackend: NewScheduledTransactionsBackend(log, base, chainID, scheduledTxIndex, scheduledTransactions, state, scriptExecutor), }, nil } diff --git a/access/backends/extended/backend_scheduled_transactions.go b/access/backends/extended/backend_scheduled_transactions.go index 6f6ec6a47f4..95dcf70226b 100644 --- a/access/backends/extended/backend_scheduled_transactions.go +++ b/access/backends/extended/backend_scheduled_transactions.go @@ -101,6 +101,7 @@ type ScheduledTransactionsBackend struct { *backendBase log zerolog.Logger + chainID flow.ChainID store storage.ScheduledTransactionsIndexReader scheduledTransactions storage.ScheduledTransactionsReader state protocol.State @@ -111,6 +112,7 @@ type ScheduledTransactionsBackend struct { func NewScheduledTransactionsBackend( log zerolog.Logger, base *backendBase, + chainID flow.ChainID, store storage.ScheduledTransactionsIndexReader, scheduledTransactions storage.ScheduledTransactionsReader, state protocol.State, @@ -119,6 +121,7 @@ func NewScheduledTransactionsBackend( return &ScheduledTransactionsBackend{ backendBase: base, log: log, + chainID: chainID, store: store, scheduledTransactions: scheduledTransactions, state: state, @@ -400,20 +403,17 @@ func (b *ScheduledTransactionsBackend) expand( } if expandOptions.Transaction { - allScheduledTxs, err := b.transactionsProvider.ScheduledTransactionsByBlockID(ctx, executedHeader) + txBody, err := b.systemCollections. + ByHeight(executedHeader.Height). + ExecuteCallbacksTransaction(b.chainID.Chain(), tx.ID, tx.ExecutionEffort) if err != nil { - return fmt.Errorf("could not retrieve all scheduled transactions: %w", err) + return fmt.Errorf("failed to construct scheduled transaction body: %w", err) } - for _, scheduledTx := range allScheduledTxs { - if scheduledTx.ID() == tx.ExecutedTransactionID { - tx.Transaction = scheduledTx - break - } - } - if tx.Transaction == nil { - return fmt.Errorf("scheduled transaction %s not found in block %s", tx.ExecutedTransactionID, executedHeader.ID()) + if txBody.ID() != tx.ExecutedTransactionID { + return fmt.Errorf("scheduled transaction body ID %s does not match executed transaction ID %s", txBody.ID(), tx.ExecutedTransactionID) } + tx.Transaction = txBody } if expandOptions.Result { diff --git a/fvm/blueprints/scheduled_callback.go b/fvm/blueprints/scheduled_callback.go index 0afcc44c493..38463da568b 100644 --- a/fvm/blueprints/scheduled_callback.go +++ b/fvm/blueprints/scheduled_callback.go @@ -56,14 +56,9 @@ func ExecuteCallbacksTransactions(chain flow.Chain, processEvents flow.EventsLis return nil, fmt.Errorf("failed to get callback args from event: %w", err) } - tx, err := flow.NewTransactionBodyBuilder(). - AddAuthorizer(sc.ScheduledTransactionExecutor.Address). - SetScript(script). - AddArgument(id). - SetComputeLimit(effort). - Build() + tx, err := generateExecuteCallbacksTransaction(sc, script, id, effort) if err != nil { - return nil, fmt.Errorf("failed to construct execute callback transactions: %w", err) + return nil, fmt.Errorf("failed to generate execute callback transaction: %w", err) } txs = append(txs, tx) } @@ -71,15 +66,53 @@ func ExecuteCallbacksTransactions(chain flow.Chain, processEvents flow.EventsLis return txs, nil } +// ExecuteCallbacksTransaction constructs a list of transaction to execute callbacks, for the given chain. +// +// No error returns are expected during normal operation. +func ExecuteCallbacksTransaction(chain flow.Chain, id uint64, effort uint64) (*flow.TransactionBody, error) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + env := sc.AsTemplateEnv() + script := templates.GenerateSchedulerExecutorTransactionScript(env) + + return generateExecuteCallbacksTransaction(sc, script, id, effort) +} + +// generateExecuteCallbacksTransaction generates a transaction to execute a callback, for the given chain. +// +// No error returns are expected during normal operation. +func generateExecuteCallbacksTransaction( + sc *systemcontracts.SystemContracts, + script []byte, + id uint64, + effort uint64, +) (*flow.TransactionBody, error) { + encID, err := jsoncdc.Encode(cadence.UInt64(id)) + if err != nil { + return nil, fmt.Errorf("failed to encode id: %w", err) + } + + tx, err := flow.NewTransactionBodyBuilder(). + AddAuthorizer(sc.ScheduledTransactionExecutor.Address). + SetScript(script). + AddArgument(encID). + SetComputeLimit(effort). + Build() + if err != nil { + return nil, fmt.Errorf("failed to construct execute callback transactions: %w", err) + } + + return tx, nil +} + // callbackArgsFromEvent decodes the event payload and returns the callback ID and effort. // // The event for processed callback event is emitted by the process callback transaction from // callback scheduler contract and has the following signature: // event PendingExecution(id: UInt64, priority: UInt8, executionEffort: UInt64, fees: UFix64, callbackOwner: Address) -func callbackArgsFromEvent(event flow.Event) ([]byte, uint64, error) { +func callbackArgsFromEvent(event flow.Event) (uint64, uint64, error) { cadenceId, cadenceEffort, err := ParsePendingExecutionEvent(event) if err != nil { - return nil, 0, err + return 0, 0, err } effort := uint64(cadenceEffort) @@ -89,12 +122,7 @@ func callbackArgsFromEvent(event flow.Event) ([]byte, uint64, error) { effort = flow.DefaultMaxTransactionGasLimit } - encID, err := jsoncdc.Encode(cadenceId) - if err != nil { - return nil, 0, fmt.Errorf("failed to encode id: %w", err) - } - - return encID, uint64(effort), nil + return uint64(cadenceId), uint64(effort), nil } // ParsePendingExecutionEvent decodes the PendingExecution event payload and returns the scheduled diff --git a/fvm/blueprints/scheduled_callback_test.go b/fvm/blueprints/scheduled_callback_test.go index 9aa7e2d1d67..d019db9f79c 100644 --- a/fvm/blueprints/scheduled_callback_test.go +++ b/fvm/blueprints/scheduled_callback_test.go @@ -18,6 +18,7 @@ import ( "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/fixtures" ) func TestProcessCallbacksTransaction(t *testing.T) { @@ -351,3 +352,104 @@ func createEventWithModifiedField(t *testing.T, fieldName string, newValue caden Payload: payload, } } + +// TestProcessCallbacksTransactionHash tests that the hash of the ProcessCallbacksTransaction does not change. +func TestProcessCallbacksTransactionHash(t *testing.T) { + t.Parallel() + + expectedHashes := []chainHash{ + {chainId: "flow-mainnet", expectedHash: "a9caece21b073a85cdfa8e27c6781426025ab67d7018b9afe388a18cc293e14f"}, + {chainId: "flow-testnet", expectedHash: "af35ecd8b485e41ed9fa68580557ced336cf46789e4c93af0378ea9812cc1a4b"}, + {chainId: "flow-previewnet", expectedHash: "7a8b24b172d27d3174cfc8ad40f4412f2483b669b05ea4b25f7cbba6a1decbfb"}, + {chainId: "flow-emulator", expectedHash: "4b58ffb851c3ce5d98922c9b3e9ab0a858de84912d1697aa38bef95e23cee5d4"}, + } + + var actualHashes []chainHash + for _, expected := range expectedHashes { + chain := flow.ChainID(expected.chainId) + tx, err := blueprints.ProcessCallbacksTransaction(chain.Chain()) + require.NoError(t, err) + actualHashes = append(actualHashes, chainHash{chainId: expected.chainId, expectedHash: tx.ID().String()}) + } + + require.Equal(t, expectedHashes, actualHashes, + "Hashes of the ProcessCallbacksTransaction have changed.\n"+ + "Update the expected hashes with the following values:\n%s", formatHashes(actualHashes)) +} + +// TestExecuteCallbacksTransactionHash tests that the hash of the ExecuteCallbacksTransaction does not change +// for a given set of deterministic inputs. +func TestExecuteCallbacksTransactionHash(t *testing.T) { + t.Parallel() + + const id = 42 + const effort = 1000 + + expectedHashes := []chainHash{ + {chainId: "flow-mainnet", expectedHash: "cae4adc3eb92ee67a47754e3e7095e4402249aa482be19b800de601ce4cd0d32"}, + {chainId: "flow-testnet", expectedHash: "5ede5d3a9698685a98027a855cd2711968b21e1ed3b3e479eab8893ead883817"}, + {chainId: "flow-previewnet", expectedHash: "8fc04e2eb6672e75588ea1210be5d74b9453167dec82a0f5f78afd27a0e28f8b"}, + {chainId: "flow-emulator", expectedHash: "0ee4841eee4519049af0440ac7ab553adf4fa62d5835eed63029f73482a1ff54"}, + } + + var actualHashes []chainHash + for _, expected := range expectedHashes { + chain := flow.ChainID(expected.chainId) + tx, err := blueprints.ExecuteCallbacksTransaction(chain.Chain(), id, effort) + require.NoError(t, err) + actualHashes = append(actualHashes, chainHash{chainId: expected.chainId, expectedHash: tx.ID().String()}) + } + + require.Equal(t, expectedHashes, actualHashes, + "Hashes of the ExecuteCallbacksTransaction have changed.\n"+ + "Update the expected hashes with the following values:\n%s", formatHashes(actualHashes)) +} + +// TestExecuteCallbacksTransactionsHash tests that the hashes of transactions produced by +// ExecuteCallbacksTransactions do not change for a deterministic set of events. +func TestExecuteCallbacksTransactionsHash(t *testing.T) { + t.Parallel() + + type chainTxHashes struct { + chainId string + expectedHashes []string + } + + expected := []chainTxHashes{ + {chainId: "flow-mainnet", expectedHashes: []string{ + "9f7294a3490c0e1967022e03b485c28d9ac846ba81cd1689339f4b30996fa8e4", + "9a2e265f5df74caa80ef02121965b5e9f5626cee1585e3c8ac8cc84d7eceb901", + "e5d1f9d103d6e03751d0387bddfc586d07b8b9dc5cb9fa9d145742cdcd3e8bbd", + }}, + {chainId: "flow-testnet", expectedHashes: []string{ + "e2fc0bc9264e0a70250f69bfb0d60d8b9fb9b85292177f2ceaad75e00d03a51e", + "852f2f4c7a62e71770845d23abec273356595aa81dd1997b22d5b626e0baf821", + "3f8caada9afc30ca4ea85bb5181cfdad0d2f86db9767e1c3a7603c2f6bc30ee6", + }}, + {chainId: "flow-previewnet", expectedHashes: []string{ + "e34efc26d3cfb235fb505924eaaa94e409d1aef1b0ed465d11c0a561b346a5ac", + "07c93c8fddb9fdd4354518ae4ad4eb8131d2e8edab5bf84867cd9c4189f965fe", + "6d25de124ba1046c11d5b4e68a01e34dbd14d1ea152e69d543f0836cfc0ce501", + }}, + {chainId: "flow-emulator", expectedHashes: []string{ + "109794396aa22e43b9f18d955e9f8bd814727286dedd8eb0d27c9505255ee2ef", + "71d3019b3356d2358f670ef6b6167d19ab8534d641b7b0163798300fdea59ae9", + "286a2b217c23683d32510067ea213ca3bc693b9fb96db71f2ac07ab5cac2fb56", + }}, + } + + for _, exp := range expected { + chainID := flow.ChainID(exp.chainId) + gen := fixtures.NewGeneratorSuite(fixtures.WithSeed(42), fixtures.WithChainID(chainID)) + events := gen.PendingExecutionEvents().List(3) + + txs, err := blueprints.ExecuteCallbacksTransactions(chainID.Chain(), events) + require.NoError(t, err) + require.Len(t, txs, len(exp.expectedHashes), "chain %s: unexpected number of transactions", exp.chainId) + + for i, tx := range txs { + require.Equal(t, exp.expectedHashes[i], tx.ID().String(), + "chain %s tx[%d] hash changed", exp.chainId, i) + } + } +} diff --git a/model/access/system_collection.go b/model/access/system_collection.go index a485941468c..e182dbcf2c5 100644 --- a/model/access/system_collection.go +++ b/model/access/system_collection.go @@ -20,6 +20,11 @@ type SystemCollectionBuilder interface { // No error returns are expected during normal operation. ExecuteCallbacksTransactions(chain flow.Chain, processEvents flow.EventsList) ([]*flow.TransactionBody, error) + // ExecuteCallbacksTransaction constructs a transaction to execute a callback, for the given chain. + // + // No error returns are expected during normal operation. + ExecuteCallbacksTransaction(chain flow.Chain, id uint64, effort uint64) (*flow.TransactionBody, error) + // SystemChunkTransaction creates and returns the transaction corresponding to the // system chunk for the given chain. // diff --git a/model/access/systemcollection/system_collection_v0.go b/model/access/systemcollection/system_collection_v0.go index 5c3cfe942e0..5966c447d3a 100644 --- a/model/access/systemcollection/system_collection_v0.go +++ b/model/access/systemcollection/system_collection_v0.go @@ -8,6 +8,7 @@ import ( "github.com/rs/zerolog/log" + "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/flow-core-contracts/lib/go/templates" @@ -88,6 +89,32 @@ func (b *builderV0) ExecuteCallbacksTransactions(chain flow.Chain, processEvents return txs, nil } +// ExecuteCallbacksTransaction constructs a list of transaction to execute callbacks, for the given chain. +// +// No error returns are expected during normal operation. +func (b *builderV0) ExecuteCallbacksTransaction(chain flow.Chain, id uint64, effort uint64) (*flow.TransactionBody, error) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + env := sc.AsTemplateEnv() + script := templates.GenerateSchedulerExecutorTransactionScript(env) + + encID, err := jsoncdc.Encode(cadence.NewUInt64(id)) + if err != nil { + return nil, fmt.Errorf("failed to encode id: %w", err) + } + + tx, err := flow.NewTransactionBodyBuilder(). + AddAuthorizer(sc.FlowServiceAccount.Address). + SetScript(script). + AddArgument(encID). + SetComputeLimit(effort). + Build() + if err != nil { + return nil, fmt.Errorf("failed to construct execute callback transactions: %w", err) + } + + return tx, nil +} + // callbackArgsFromEventV1 decodes the event payload and returns the callback ID and effort. // // No error returns are expected during normal operation. diff --git a/model/access/systemcollection/system_collection_v0_test.go b/model/access/systemcollection/system_collection_v0_test.go index 5a813fcafca..be47c2491c1 100644 --- a/model/access/systemcollection/system_collection_v0_test.go +++ b/model/access/systemcollection/system_collection_v0_test.go @@ -84,6 +84,14 @@ func (s *builderV0Suite) TestExecuteCallbacksTransactions() { } } +func (s *builderV0Suite) TestExecuteCallbacksTransaction() { + expectedID := flow.MustHexStringToIdentifier("3b16467513ee196aa369bafab83786dcd0aa0ae09059369df13cf45b13b7de26") + + tx, err := s.builder.ExecuteCallbacksTransaction(s.g.ChainID().Chain(), 42, 1000) + s.Require().NoError(err) + s.Require().True(expectedID == tx.ID(), "invalid change made in the v0 versioned system collection") +} + func (s *builderV0Suite) TestSystemChunkTransaction() { expectedID := flow.MustHexStringToIdentifier("3408f8b1aa1b33cfc3f78c3f15217272807b14cec4ef64168bcf313bc4174621") diff --git a/model/access/systemcollection/system_collection_v1.go b/model/access/systemcollection/system_collection_v1.go index 0a847ec4e52..a5b0322191e 100644 --- a/model/access/systemcollection/system_collection_v1.go +++ b/model/access/systemcollection/system_collection_v1.go @@ -34,6 +34,13 @@ func (b *builderV1) ExecuteCallbacksTransactions(chain flow.Chain, processEvents return blueprints.ExecuteCallbacksTransactions(chain, processEvents) } +// ExecuteCallbacksTransaction constructs a transaction to execute a callback, for the given chain. +// +// No error returns are expected during normal operation. +func (b *builderV1) ExecuteCallbacksTransaction(chain flow.Chain, id uint64, effort uint64) (*flow.TransactionBody, error) { + return blueprints.ExecuteCallbacksTransaction(chain, id, effort) +} + // SystemChunkTransaction creates and returns the transaction corresponding to the // system chunk for the given chain. // diff --git a/model/access/systemcollection/system_collection_v1_test.go b/model/access/systemcollection/system_collection_v1_test.go index 90db7f2a6b4..00f3d2239cf 100644 --- a/model/access/systemcollection/system_collection_v1_test.go +++ b/model/access/systemcollection/system_collection_v1_test.go @@ -84,6 +84,14 @@ func (s *builderV1Suite) TestExecuteCallbacksTransactions() { } } +func (s *builderV1Suite) TestExecuteCallbacksTransaction() { + expectedID := flow.MustHexStringToIdentifier("cae4adc3eb92ee67a47754e3e7095e4402249aa482be19b800de601ce4cd0d32") + + tx, err := s.builder.ExecuteCallbacksTransaction(s.g.ChainID().Chain(), 42, 1000) + s.Require().NoError(err) + s.Require().True(expectedID == tx.ID(), "invalid change made in the v1 versioned system collection") +} + func (s *builderV1Suite) TestSystemChunkTransaction() { expectedID := flow.MustHexStringToIdentifier("3408f8b1aa1b33cfc3f78c3f15217272807b14cec4ef64168bcf313bc4174621") From 689c00230d71b0eff8f8ecbea99a47f065ec074e Mon Sep 17 00:00:00 2001 From: Peter Argue <89119817+peterargue@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:35:47 -0700 Subject: [PATCH 18/18] fix unittests --- .../backend_scheduled_transactions_test.go | 184 ++++++++---------- 1 file changed, 81 insertions(+), 103 deletions(-) diff --git a/access/backends/extended/backend_scheduled_transactions_test.go b/access/backends/extended/backend_scheduled_transactions_test.go index fefd54af28d..a6718aee832 100644 --- a/access/backends/extended/backend_scheduled_transactions_test.go +++ b/access/backends/extended/backend_scheduled_transactions_test.go @@ -14,7 +14,9 @@ import ( "github.com/onflow/flow/protobuf/go/flow/entities" providermock "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider/mock" + "github.com/onflow/flow-go/fvm/blueprints" accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/access/systemcollection" "github.com/onflow/flow-go/model/flow" executionmock "github.com/onflow/flow-go/module/execution/mock" "github.com/onflow/flow-go/module/irrecoverable" @@ -283,7 +285,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) expectedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusScheduled} @@ -304,7 +306,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) store.On("ByID", uint64(99)).Return(accessmodel.ScheduledTransaction{}, storage.ErrNotFound).Once() @@ -322,7 +324,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) store.On("ByID", uint64(1)).Return(accessmodel.ScheduledTransaction{}, storage.ErrNotBootstrapped).Once() @@ -340,7 +342,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) storageErr := fmt.Errorf("unexpected disk failure") @@ -361,7 +363,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) tx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusScheduled} @@ -396,7 +398,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { collections: mockCollections, blocks: mockBlocks, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) tx := accessmodel.ScheduledTransaction{ @@ -436,7 +438,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { headers: mockHeaders, transactionsProvider: mockProvider, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) txID := unittest.IdentifierFixture() @@ -474,39 +476,49 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) mockHeaders := storagemock.NewHeaders(t) - mockProvider := providermock.NewTransactionProvider(t) + + sysCollections, err := systemcollection.NewVersioned( + flow.Mainnet.Chain(), systemcollection.Default(flow.Mainnet), + ) + require.NoError(t, err) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{ - config: defaultConfig, - headers: mockHeaders, - transactionsProvider: mockProvider, + config: defaultConfig, + headers: mockHeaders, + systemCollections: sysCollections, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) - txBody := unittest.TransactionBodyFixture() - txID := txBody.ID() + // construct the expected tx body using the same path the production code will use + const scheduledTxID = uint64(42) + const executionEffort = uint64(1000) + expectedTxBody, err := blueprints.ExecuteCallbacksTransaction(flow.Mainnet.Chain(), scheduledTxID, executionEffort) + require.NoError(t, err) + txID := expectedTxBody.ID() + blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: status, ExecutedTransactionID: txID} + storedTx := accessmodel.ScheduledTransaction{ + ID: scheduledTxID, Status: status, + ExecutedTransactionID: txID, ExecutionEffort: executionEffort, + } - store.On("ByID", uint64(1)).Return(storedTx, nil).Once() + store.On("ByID", scheduledTxID).Return(storedTx, nil).Once() scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() - mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). - Return([]*flow.TransactionBody{&txBody}, nil).Once() result, err := backend.GetScheduledTransaction( - context.Background(), 1, + context.Background(), scheduledTxID, ScheduledTransactionExpandOptions{Transaction: true}, defaultEncoding, ) require.NoError(t, err) require.NotNil(t, result.Transaction) - assert.Equal(t, &txBody, result.Transaction) + assert.Equal(t, txID, result.Transaction.ID()) assert.Nil(t, result.Result) }) } @@ -519,7 +531,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, mockState, mockScriptExecutor, + flow.Mainnet, store, nil, mockState, mockScriptExecutor, ) handlerOwner := unittest.RandomAddressFixture() @@ -567,7 +579,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { collections: mockCollections, blocks: mockBlocks, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) createdTxID := unittest.IdentifierFixture() @@ -602,7 +614,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { collections: mockCollections, blocks: mockBlocks, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) cancelledTxID := unittest.IdentifierFixture() @@ -629,7 +641,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) txID := unittest.IdentifierFixture() @@ -654,7 +666,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) txID := unittest.IdentifierFixture() @@ -680,7 +692,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig, headers: mockHeaders}, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) txID := unittest.IdentifierFixture() @@ -701,77 +713,43 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { verifyThrown() }) - t.Run("ScheduledTransactionsByBlockID error during expand triggers irrecoverable", func(t *testing.T) { + t.Run("transaction ID mismatch during expand triggers irrecoverable", func(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) mockHeaders := storagemock.NewHeaders(t) - mockProvider := providermock.NewTransactionProvider(t) - - backend := NewScheduledTransactionsBackend( - unittest.Logger(), - &backendBase{ - config: defaultConfig, - headers: mockHeaders, - transactionsProvider: mockProvider, - }, - store, scheduledTxLookup, nil, nil, - ) - - txID := unittest.IdentifierFixture() - blockHeader := unittest.BlockHeaderFixture() - blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} - providerErr := fmt.Errorf("provider error") - - store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() - mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() - mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). - Return(nil, providerErr).Once() - - signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) - _, err := backend.GetScheduledTransaction( - signalerCtx, 1, ScheduledTransactionExpandOptions{Transaction: true}, defaultEncoding, + sysCollections, err := systemcollection.NewVersioned( + flow.Mainnet.Chain(), systemcollection.Default(flow.Mainnet), ) - require.Error(t, err) - verifyThrown() - }) - - t.Run("transaction not found in block during expand triggers irrecoverable", func(t *testing.T) { - store := storagemock.NewScheduledTransactionsIndexReader(t) - scheduledTxLookup := storagemock.NewScheduledTransactionsReader(t) - mockHeaders := storagemock.NewHeaders(t) - mockProvider := providermock.NewTransactionProvider(t) + require.NoError(t, err) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{ - config: defaultConfig, - headers: mockHeaders, - transactionsProvider: mockProvider, + config: defaultConfig, + headers: mockHeaders, + systemCollections: sysCollections, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) - // txID that does NOT match the tx body returned by the provider. - txID := unittest.IdentifierFixture() + // ExecutedTransactionID does NOT match the tx body constructed by systemCollections + mismatchedTxID := unittest.IdentifierFixture() blockHeader := unittest.BlockHeaderFixture() blockID := blockHeader.ID() - storedTx := accessmodel.ScheduledTransaction{ID: 1, Status: accessmodel.ScheduledTxStatusExecuted, ExecutedTransactionID: txID} - // otherTxBody.ID() != txID - otherTxBody := unittest.TransactionBodyFixture() + storedTx := accessmodel.ScheduledTransaction{ + ID: 42, Status: accessmodel.ScheduledTxStatusExecuted, + ExecutedTransactionID: mismatchedTxID, ExecutionEffort: 1000, + } - store.On("ByID", uint64(1)).Return(storedTx, nil).Once() - scheduledTxLookup.On("BlockIDByTransactionID", txID).Return(blockID, nil).Once() + store.On("ByID", uint64(42)).Return(storedTx, nil).Once() + scheduledTxLookup.On("BlockIDByTransactionID", mismatchedTxID).Return(blockID, nil).Once() mockHeaders.On("ByBlockID", blockID).Return(blockHeader, nil).Once() - mockProvider.On("ScheduledTransactionsByBlockID", mocktestify.Anything, blockHeader). - Return([]*flow.TransactionBody{&otherTxBody}, nil).Once() signalerCtx, verifyThrown := signalerCtxExpectingThrow(t) - _, err := backend.GetScheduledTransaction( - signalerCtx, 1, ScheduledTransactionExpandOptions{Transaction: true}, defaultEncoding, + _, err = backend.GetScheduledTransaction( + signalerCtx, 42, ScheduledTransactionExpandOptions{Transaction: true}, defaultEncoding, ) require.Error(t, err) verifyThrown() @@ -790,7 +768,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { headers: mockHeaders, transactionsProvider: mockProvider, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) txID := unittest.IdentifierFixture() @@ -821,7 +799,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, mockState, nil, + flow.Mainnet, store, nil, mockState, nil, ) storedTx := accessmodel.ScheduledTransaction{ @@ -855,7 +833,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, mockState, mockScriptExecutor, + flow.Mainnet, store, nil, mockState, mockScriptExecutor, ) handlerOwner := unittest.RandomAddressFixture() @@ -894,7 +872,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransaction(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, mockState, mockScriptExecutor, + flow.Mainnet, store, nil, mockState, mockScriptExecutor, ) handlerOwner := unittest.RandomAddressFixture() @@ -938,7 +916,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) txs := []accessmodel.ScheduledTransaction{ @@ -963,7 +941,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) // limit=2, provide 3 items: CollectResults collects 2, then peeks at item 3 to build cursor @@ -991,7 +969,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). @@ -1009,7 +987,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). @@ -1027,7 +1005,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) _, err := backend.GetScheduledTransactions( @@ -1045,7 +1023,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) cursor := &accessmodel.ScheduledTransactionCursor{ID: 100} @@ -1064,7 +1042,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). @@ -1084,7 +1062,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) store.On("All", (*accessmodel.ScheduledTransactionCursor)(nil)). @@ -1105,7 +1083,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) storageErr := fmt.Errorf("unexpected disk failure") @@ -1130,7 +1108,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) txID := unittest.IdentifierFixture() @@ -1167,7 +1145,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactions(t *testing.T) { collections: mockCollections, blocks: mockBlocks, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) createdTxID := unittest.IdentifierFixture() @@ -1205,7 +1183,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1231,7 +1209,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1250,7 +1228,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1270,7 +1248,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1290,7 +1268,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1311,7 +1289,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1333,7 +1311,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi store := storagemock.NewScheduledTransactionsIndexReader(t) backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, nil, nil, nil, + flow.Mainnet, store, nil, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1359,7 +1337,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi backend := NewScheduledTransactionsBackend( unittest.Logger(), &backendBase{config: defaultConfig}, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1397,7 +1375,7 @@ func TestScheduledTransactionsBackend_GetScheduledTransactionsByAddress(t *testi collections: mockCollections, blocks: mockBlocks, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) addr := unittest.RandomAddressFixture() @@ -1448,7 +1426,7 @@ func TestScheduledTransactionsBackend_PopulateBlockTimestamps(t *testing.T) { blocks: mockBlocks, collections: mockCollections, }, - store, scheduledTxLookup, nil, nil, + flow.Mainnet, store, scheduledTxLookup, nil, nil, ) }