diff --git a/cmd/run.go b/cmd/run.go index 4f53f36e0a..fccbbe91ba 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -723,7 +723,8 @@ func forkIDIntervals(ctx context.Context, st *state.State, etherman *etherman.Cl if err != nil && !errors.Is(err, state.ErrStateNotSynchronized) { return []state.ForkIDInterval{}, fmt.Errorf("error checking lastL1BlockSynced. Error: %v", err) } - if lastBlock != nil { + // If lastBlock is below genesisBlock means state.ErrStateNotSynchronized (haven't started yet the sync process, is doing pregenesis sync) + if lastBlock != nil && lastBlock.BlockNumber >= genesisBlockNumber { log.Info("Getting forkIDs intervals. Please wait...") // Read Fork ID FROM POE SC forkIntervals, err := etherman.GetForks(ctx, genesisBlockNumber, lastBlock.BlockNumber) @@ -763,32 +764,14 @@ func forkIDIntervals(ctx context.Context, st *state.State, etherman *etherman.Cl } forkIDIntervals = forkIntervals } else { - log.Debug("Getting all forkIDs") - - // Get last L1 block number - bn, err := etherman.GetLatestBlockNumber(ctx) - if err != nil { - return []state.ForkIDInterval{}, fmt.Errorf("error getting latest block number. Error: %v", err) - } - - // Get all forkIDs since genesis - forkIntervals, err := etherman.GetForks(ctx, genesisBlockNumber, bn) + log.Debug("Getting initial forkID") + forkIntervals, err := etherman.GetForks(ctx, genesisBlockNumber, genesisBlockNumber) if err != nil { return []state.ForkIDInterval{}, fmt.Errorf("error getting forks. Please check the configuration. Error: %v", err) } else if len(forkIntervals) == 0 { return []state.ForkIDInterval{}, fmt.Errorf("error: no forkID received. It should receive at least one, please check the configuration...") } forkIDIntervals = forkIntervals - - log.Debugf("Retrieved %d forkIDs", len(forkIDIntervals)) - - log.Debug("Adding forkIDs to db and memory") - for _, forkID := range forkIDIntervals { - err = st.AddForkIDInterval(ctx, forkID, nil) - if err != nil { - log.Fatal("error adding forkID to db. Error: ", err) - } - } } } return forkIDIntervals, nil diff --git a/state/block.go b/state/block.go index c5c9fbb1a2..50d7fb730f 100644 --- a/state/block.go +++ b/state/block.go @@ -1,6 +1,7 @@ package state import ( + "fmt" "time" "github.com/ethereum/go-ethereum/common" @@ -14,6 +15,10 @@ type Block struct { ReceivedAt time.Time } +func (b *Block) String() string { + return fmt.Sprintf("BlockNumber: %d, BlockHash: %s, ParentHash: %s, ReceivedAt: %s", b.BlockNumber, b.BlockHash, b.ParentHash, b.ReceivedAt) +} + // NewBlock creates a block with the given data. func NewBlock(blockNumber uint64) *Block { return &Block{BlockNumber: blockNumber} diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 8caa59bf37..68b7ab3439 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -366,6 +366,7 @@ func (s *ClientSynchronizer) processGenesis() (*state.Block, error) { log.Errorf("error creating db transaction to get latest block. Error: %v", err) return nil, err } + // This add the genesis block and set values on tree genesisRoot, err := s.state.SetGenesis(s.ctx, *lastEthBlockSynced, s.genesis, stateMetrics.SynchronizerCallerLabel, dbTx) if err != nil { log.Error("error setting genesis: ", err) @@ -562,40 +563,36 @@ func (s *ClientSynchronizer) RequestAndProcessRollupGenesisBlock(dbTx pgx.Tx, la log.Error("error getting rollupInfoByBlockRange after set the genesis: ", err) return err } - // Check that the response is the expected. It should be 1 block with 2 orders + // Check that the response is the expected. It should be 1 block with ForkID event + log.Debugf("SanityCheck for genesis block (%d) events: %+v", lastEthBlockSynced.BlockNumber, order) err = sanityCheckForGenesisBlockRollupInfo(blocks, order) if err != nil { return err } - forkId := s.state.GetForkIDByBlockNumber(blocks[0].BlockNumber) - err = s.l1EventProcessors.Process(s.ctx, actions.ForkIdType(forkId), etherman.Order{Name: etherman.ForkIDsOrder, Pos: 0}, &blocks[0], dbTx) + log.Infof("Processing genesis block %d orders: %+v", lastEthBlockSynced.BlockNumber, order) + err = s.internalProcessBlock(blocks[0], order[blocks[0].BlockHash], false, dbTx) if err != nil { - log.Error("error storing genesis forkID: ", err) - return err - } - if len(blocks[0].SequencedBatches) != 0 { - batchSequence := l1event_orders.GetSequenceFromL1EventOrder(etherman.InitialSequenceBatchesOrder, &blocks[0], 0) - forkId = s.state.GetForkIDByBatchNumber(batchSequence.FromBatchNumber) - err = s.l1EventProcessors.Process(s.ctx, actions.ForkIdType(forkId), etherman.Order{Name: etherman.InitialSequenceBatchesOrder, Pos: 0}, &blocks[0], dbTx) - if err != nil { - log.Error("error storing initial tx (batch 1): ", err) - return err - } + log.Errorf("error processinge events on genesis block %d: err:%w", lastEthBlockSynced.BlockNumber, err) } + return nil } func sanityCheckForGenesisBlockRollupInfo(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { if len(blocks) != 1 || len(order) < 1 || len(order[blocks[0].BlockHash]) < 1 { - log.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with 2 orders") - return fmt.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with 2 orders") + log.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with minimum 2 orders") + return fmt.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with minimum 2 orders") } - if order[blocks[0].BlockHash][0].Name != etherman.ForkIDsOrder { - log.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected ForkIDsOrder, got %s", order[blocks[0].BlockHash][0].Name) - return fmt.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected ForkIDsOrder") + // The genesis block implies 1 ForkID event + for _, value := range order[blocks[0].BlockHash] { + if value.Name == etherman.ForkIDsOrder { + return nil + } } - - return nil + err := fmt.Errorf("events on genesis block (%d) need a ForkIDsOrder event but this block got %+v", + blocks[0].BlockNumber, order[blocks[0].BlockHash]) + log.Error(err.Error()) + return err } // This function syncs the node from a specific block to the latest @@ -723,7 +720,6 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc // ProcessBlockRange process the L1 events and stores the information in the db func (s *ClientSynchronizer) ProcessBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { - // New info has to be included into the db using the state for i := range blocks { // Begin db transaction dbTx, err := s.state.BeginStateTransaction(s.ctx) @@ -731,16 +727,21 @@ func (s *ClientSynchronizer) ProcessBlockRange(blocks []etherman.Block, order ma log.Errorf("error creating db transaction to store block. BlockNumber: %d, error: %v", blocks[i].BlockNumber, err) return err } - b := state.Block{ - BlockNumber: blocks[i].BlockNumber, - BlockHash: blocks[i].BlockHash, - ParentHash: blocks[i].ParentHash, - ReceivedAt: blocks[i].ReceivedAt, + err = s.internalProcessBlock(blocks[i], order[blocks[i].BlockHash], true, dbTx) + if err != nil { + log.Error("rollingback BlockNumber: %d, because error internalProcessBlock: ", blocks[i].BlockNumber, err) + // If any goes wrong we ensure that the state is rollbacked + rollbackErr := dbTx.Rollback(s.ctx) + if rollbackErr != nil && !errors.Is(rollbackErr, pgx.ErrTxClosed) { + log.Warnf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) + return fmt.Errorf("error rollback BlockNumber: %d: err:%w original error:%w", blocks[i].BlockNumber, rollbackErr, err) + } + return err } - // Add block information - err = s.state.AddBlock(s.ctx, &b, dbTx) + + err = dbTx.Commit(s.ctx) if err != nil { - log.Errorf("error storing block. BlockNumber: %d, error: %v", blocks[i].BlockNumber, err) + log.Errorf("error committing state to store block. BlockNumber: %d, err: %v", blocks[i].BlockNumber, err) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { log.Errorf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) @@ -748,53 +749,55 @@ func (s *ClientSynchronizer) ProcessBlockRange(blocks []etherman.Block, order ma } return err } + } + return nil +} - for _, element := range order[blocks[i].BlockHash] { - batchSequence := l1event_orders.GetSequenceFromL1EventOrder(element.Name, &blocks[i], element.Pos) - var forkId uint64 - if batchSequence != nil { - forkId = s.state.GetForkIDByBatchNumber(batchSequence.FromBatchNumber) - log.Debug("EventOrder: ", element.Name, ". Batch Sequence: ", batchSequence, "forkId: ", forkId) - } else { - forkId = s.state.GetForkIDByBlockNumber(blocks[i].BlockNumber) - log.Debug("EventOrder: ", element.Name, ". BlockNumber: ", blocks[i].BlockNumber, "forkId: ", forkId) - } - forkIdTyped := actions.ForkIdType(forkId) - // Process event received from l1 - err := s.l1EventProcessors.Process(s.ctx, forkIdTyped, element, &blocks[i], dbTx) - if err != nil { - log.Error("error: ", err) - // If any goes wrong we ensure that the state is rollbacked - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil && !errors.Is(rollbackErr, pgx.ErrTxClosed) { - log.Warnf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) - return rollbackErr - } - return err - } +// ProcessBlockRange process the L1 events and stores the information in the db +func (s *ClientSynchronizer) internalProcessBlock(blocks etherman.Block, order []etherman.Order, addBlock bool, dbTx pgx.Tx) error { + var err error + // New info has to be included into the db using the state + if addBlock { + b := state.Block{ + BlockNumber: blocks.BlockNumber, + BlockHash: blocks.BlockHash, + ParentHash: blocks.ParentHash, + ReceivedAt: blocks.ReceivedAt, } - log.Debug("Checking FlushID to commit L1 data to db") - err = s.checkFlushID(dbTx) + // Add block information + log.Debugf("Storing block. Block: %s", b.String()) + err = s.state.AddBlock(s.ctx, &b, dbTx) if err != nil { - log.Errorf("error checking flushID. Error: %v", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state. RollbackErr: %s, Error : %v", rollbackErr.Error(), err) - return rollbackErr - } + log.Errorf("error storing block. BlockNumber: %d, error: %v", blocks.BlockNumber, err) return err } - err = dbTx.Commit(s.ctx) + } + + for _, element := range order { + batchSequence := l1event_orders.GetSequenceFromL1EventOrder(element.Name, &blocks, element.Pos) + var forkId uint64 + if batchSequence != nil { + forkId = s.state.GetForkIDByBatchNumber(batchSequence.FromBatchNumber) + log.Debug("EventOrder: ", element.Name, ". Batch Sequence: ", batchSequence, "forkId: ", forkId) + } else { + forkId = s.state.GetForkIDByBlockNumber(blocks.BlockNumber) + log.Debug("EventOrder: ", element.Name, ". BlockNumber: ", blocks.BlockNumber, "forkId: ", forkId) + } + forkIdTyped := actions.ForkIdType(forkId) + // Process event received from l1 + err := s.l1EventProcessors.Process(s.ctx, forkIdTyped, element, &blocks, dbTx) if err != nil { - log.Errorf("error committing state to store block. BlockNumber: %d, err: %v", blocks[i].BlockNumber, err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) - return rollbackErr - } + log.Error("1EventProcessors.Process error: ", err) return err } } + log.Debug("Checking FlushID to commit L1 data to db") + err = s.checkFlushID(dbTx) + if err != nil { + log.Errorf("error checking flushID. Error: %v", err) + return err + } + return nil } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 1c1a9fdc82..3171b647bc 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -42,6 +42,30 @@ type mocks struct { //EventLog *eventLogMock } +func TestExploratory(t *testing.T) { + t.Skip("This test is for exploratory purposes, it is not a real test") + cfg := etherman.Config{ + URL: "https://eth-sepolia.g.alchemy.com/v2/", + ForkIDChunkSize: 100, + MultiGasProvider: false, + } + l1Config := etherman.L1Config{ + L1ChainID: 1, + ZkEVMAddr: common.HexToAddress("0xf3eca1d175cd7811519e251a9e0c7f13c9217734"), + RollupManagerAddr: common.HexToAddress("0x32d33d5137a7cffb54c5bf8371172bcec5f310ff"), + PolAddr: common.HexToAddress("0x6a7c3f4b0651d6da389ad1d11d962ea458cdca70"), + GlobalExitRootManagerAddr: common.HexToAddress("0xad1490c248c5d3cbae399fd529b79b42984277df"), + } + + eth, err := etherman.NewClient(cfg, l1Config, nil) + require.NoError(t, err) + blockNumber := uint64(6510565) + blocks, order, err := eth.GetRollupInfoByBlockRange(context.TODO(), blockNumber, &blockNumber) + require.NoError(t, err) + require.Equal(t, 1, len(blocks)) + require.Equal(t, 1, len(order)) +} + // Feature #2220 and #2239: Optimize Trusted state synchronization // // this Check partially point 2: Use previous batch stored in memory to avoid getting from database