diff --git a/dot/state/epoch.go b/dot/state/epoch.go index 4e472d17cc..390818eef4 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -8,6 +8,8 @@ import ( "encoding/binary" "errors" "fmt" + "strconv" + "strings" "sync" "time" @@ -28,11 +30,13 @@ var ( ) var ( - epochPrefix = "epoch" - currentEpochKey = []byte("current") - epochDataPrefix = []byte("epochinfo") - configDataPrefix = []byte("configinfo") - skipToKey = []byte("skipto") + epochPrefix = "epoch" + currentEpochKey = []byte("current") + epochDataPrefix = []byte("epochinfo") + configDataPrefix = []byte("configinfo") + skipToKey = []byte("skipto") + nextEpochDataPrefix = []byte("nextepochdata") + nextConfigDataPrefix = []byte("nextconfigdata") ) func epochDataKey(epoch uint64) []byte { @@ -47,6 +51,16 @@ func configDataKey(epoch uint64) []byte { return append(configDataPrefix, buf...) } +func nextEpochDataKey(epoch uint64, hash common.Hash) []byte { + partialKey := fmt.Sprintf("%d:%s", epoch, hash.String()) + return append(nextEpochDataPrefix, []byte(partialKey)...) +} + +func nextConfigDataKey(epoch uint64, hash common.Hash) []byte { + partialKey := fmt.Sprintf("%d:%s", epoch, hash.String()) + return append(nextConfigDataPrefix, []byte(partialKey)...) +} + // GenesisEpochDescriptor is the informations provided by calling // the genesis WASM runtime exported function `BabeAPIConfiguration` type GenesisEpochDescriptor struct { @@ -56,7 +70,7 @@ type GenesisEpochDescriptor struct { // EpochState tracks information related to each epoch type EpochState struct { - db GetterPutterNewBatcher + db database.Table baseState *BaseState blockState *BlockState epochLength uint64 // measured in slots @@ -127,16 +141,26 @@ func NewEpochState(db database.Database, blockState *BlockState, if err != nil { return nil, err } + epochTable := database.NewTable(db, epochPrefix) + nextEpochData, err := restoreMapFromDisk[types.NextEpochData](epochTable, nextEpochDataPrefix) + if err != nil { + return nil, err + } + + nextConfigData, err := restoreMapFromDisk[types.NextConfigDataV1](epochTable, nextConfigDataPrefix) + if err != nil { + return nil, err + } return &EpochState{ baseState: baseState, blockState: blockState, - db: database.NewTable(db, epochPrefix), + db: epochTable, epochLength: genesisConfig.EpochLength, slotDuration: genesisConfig.SlotDuration, skipToEpoch: skipToEpoch, - nextEpochData: make(nextEpochMap[types.NextEpochData]), - nextConfigData: make(nextEpochMap[types.NextConfigDataV1]), + nextEpochData: nextEpochData, + nextConfigData: nextConfigData, genesisEpochDescriptor: &GenesisEpochDescriptor{ EpochData: &types.EpochDataRaw{ Authorities: genesisConfig.GenesisAuthorities, @@ -151,6 +175,72 @@ func NewEpochState(db database.Database, blockState *BlockState, }, nil } +// restoreMapFromDisk retrieves the next epoch and config data maps from the database +func restoreMapFromDisk[T types.NextConfigDataV1 | types.NextEpochData](db database.Table, prefix []byte) ( + nextEpochMap[T], error) { + + resMap := make(nextEpochMap[T]) + iter, err := db.NewPrefixIterator(prefix) + if err != nil { + return resMap, err + } + + defer iter.Release() + + for iter.First(); iter.Valid(); iter.Next() { + mapValue, epoch, fork, err := getNextEpochOrConfigData[T](iter, prefix) + + if err != nil { + return resMap, err + } + + if _, ok := resMap[epoch]; !ok { + resMap[epoch] = make(map[common.Hash]T) + } + + resMap[epoch][fork] = *mapValue + } + + if err = iter.Close(); err != nil { + return resMap, err + } + return resMap, nil +} + +// getNextEpochOrConfigData retrieves the next epoch or config data from the iterator +func getNextEpochOrConfigData[T types.NextConfigDataV1 | types.NextEpochData](iter database.Iterator, prefix []byte) ( + *T, uint64, common.Hash, error) { + nextData := new(T) + key := string(iter.Key()) + value := iter.Value() + + keyWithoutPrefix := strings.Split(key, string(prefix))[1] + + // Split the key into epoch and fork + parts := strings.Split(keyWithoutPrefix, ":") + if len(parts) != 2 { + return nil, 0, common.Hash{}, fmt.Errorf("invalid key format: %s", key) + } + epoch, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, 0, common.Hash{}, err + } + + var fork common.Hash + part1, err := common.HexToBytes(parts[1]) + if err != nil { + return nil, 0, common.Hash{}, fmt.Errorf("while converting bytes to hash: %w", err) + } + + copy(fork[:], part1) + + if err = scale.Unmarshal(value, nextData); err != nil { + return nil, 0, common.Hash{}, err + } + + return nextData, epoch, fork, nil +} + // GetEpochLength returns the length of an epoch in slots func (s *EpochState) GetEpochLength() uint64 { return s.epochLength @@ -187,7 +277,7 @@ func (s *EpochState) GetEpochForBlock(header *types.Header) (uint64, error) { // actually the epoch number for block number #1 is epoch 0, // epochs start from 0 and are incremented (almost, given that epochs might be skipped) // sequentially 0...1...2, so the block number #1 belongs to epoch 0 - if header.Number == 1 { + if header.Number == 0 || header.Number == 1 { return 0, nil } @@ -596,6 +686,11 @@ func (s *EpochState) HandleBABEDigest(header *types.Header, digest types.BabeCon nextEpoch := currEpoch + 1 s.storeBABENextEpochData(nextEpoch, headerHash, val) + + if err = s.setBABENextEpochDataInDB(nextEpoch, headerHash, val); err != nil { + return fmt.Errorf("setting next epoch data in db: %w", err) + } + logger.Debugf("stored BABENextEpochData data: %v for hash: %s to epoch: %d", digest, headerHash, nextEpoch) return nil @@ -616,6 +711,11 @@ func (s *EpochState) HandleBABEDigest(header *types.Header, digest types.BabeCon } nextEpoch := currEpoch + 1 s.storeBABENextConfigData(nextEpoch, headerHash, nextConfigData) + + if err := s.setBABENextConfigData(nextEpoch, headerHash, nextConfigData); err != nil { + return fmt.Errorf("setting next config data in db: %w", err) + } + logger.Debugf("stored BABENextConfigData data: %v for hash: %s to epoch: %d", digest, headerHash, nextEpoch) return nil default: @@ -823,6 +923,19 @@ func (s *EpochState) storeBABENextEpochData(epoch uint64, hash common.Hash, next s.nextEpochData[epoch][hash] = nextEpochData } +// setBABENextEpochDataInDB stores the types.NextEpochData under epoch and hash keys +func (s *EpochState) setBABENextEpochDataInDB(epoch uint64, forkHash common.Hash, + nextEpochData types.NextEpochData) error { + encodedEpochData, err := scale.Marshal(nextEpochData) + if err != nil { + return err + } + + key := nextEpochDataKey(epoch, forkHash) + + return s.db.Put(key, encodedEpochData) +} + // StoreBABENextConfigData stores the types.NextConfigData under epoch and hash keys func (s *EpochState) storeBABENextConfigData(epoch uint64, hash common.Hash, nextConfigData types.NextConfigDataV1) { s.nextConfigDataLock.Lock() @@ -837,6 +950,18 @@ func (s *EpochState) storeBABENextConfigData(epoch uint64, hash common.Hash, nex s.nextConfigData[epoch][hash] = nextConfigData } +// setBABENextConfigData stores the types.NextConfigData under epoch and hash keys +func (s *EpochState) setBABENextConfigData(epoch uint64, + forkHash common.Hash, nextConfigData types.NextConfigDataV1) error { + encodedConfigData, err := scale.Marshal(nextConfigData) + if err != nil { + return err + } + + key := nextConfigDataKey(epoch, forkHash) + return s.db.Put(key, encodedConfigData) +} + // FinalizeBABENextEpochData stores the right types.NextEpochData by // getting the set of hashes from the received epoch and for each hash // check if the header is in the database then it's been finalized and @@ -888,12 +1013,66 @@ func (s *EpochState) FinalizeBABENextEpochData(finalizedHeader *types.Header) er for e := range s.nextEpochData { if e <= nextEpoch { delete(s.nextEpochData, e) + // remove the epoch data from the database + if err = deleteDataFromDisk[types.NextEpochData](s.db, e, nextEpochDataPrefix); err != nil { + return fmt.Errorf("cannot delete next epoch data from the database: %w", err) + } } } return nil } +// deleteDataFromDisk is a generic function that deletes all the nextEpochData or nextConfigData +// for a given epoch from the database +func deleteDataFromDisk[T types.NextEpochData | types.NextConfigDataV1]( + db database.Table, epoch uint64, prefix []byte) error { + keysToDelete, err := getDataKeysFromDisk[T](db, prefix, epoch) + if err != nil { + return fmt.Errorf("cannot get next config data keys from disk: %w", err) + } + batch := db.NewBatch() + for _, key := range keysToDelete { + err = batch.Del([]byte(key)) + if err != nil { + return fmt.Errorf("cannot delete next config data from the database: %w", err) + } + } + + if err := batch.Flush(); err != nil { + return fmt.Errorf("cannot flush deletion batch: %w", err) + } + + return nil +} + +// getDataKeysFromDisk is a generic function that returns all the nextEpochData or nextConfigData keys +// for a given epoch from the database +func getDataKeysFromDisk[T types.NextEpochData | types.NextConfigDataV1]( + db database.Table, prefix []byte, currentEpoch uint64) ( + []string, error) { + + var dataKeys []string + currentEpochPrefix := fmt.Sprintf("%s%d", prefix, currentEpoch) + + iter, err := db.NewPrefixIterator([]byte(currentEpochPrefix)) + if err != nil { + return dataKeys, err + } + + defer iter.Release() + + for iter.First(); iter.Valid(); iter.Next() { + key := string(iter.Key()) + index := strings.Index(key, epochPrefix) + secondPart := key[index+len(epochPrefix):] + dataKeys = append(dataKeys, secondPart) + + } + + return dataKeys, nil +} + // FinalizeBABENextConfigData stores the right types.NextConfigData by // getting the set of hashes from the received epoch and for each hash // check if the header is in the database then it's been finalized and @@ -950,6 +1129,10 @@ func (s *EpochState) FinalizeBABENextConfigData(finalizedHeader *types.Header) e for e := range s.nextConfigData { if e <= nextEpoch { delete(s.nextConfigData, e) + // remove the config data from the database + if err = deleteDataFromDisk[types.NextConfigDataV1](s.db, e, nextConfigDataPrefix); err != nil { + return fmt.Errorf("cannot delete next config data from the database: %w", err) + } } } diff --git a/dot/state/epoch_test.go b/dot/state/epoch_test.go index ac65ccd4cd..1cffed5684 100644 --- a/dot/state/epoch_test.go +++ b/dot/state/epoch_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/keystore" @@ -18,7 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -func newEpochStateFromGenesis(t *testing.T) *EpochState { +func newTestEpochStateFromGenesis(t *testing.T) *EpochState { db := NewInMemoryDB(t) blockState := newTestBlockState(t, newTriesEmpty()) s, err := NewEpochStateFromGenesis(db, blockState, config.BABEConfigurationTestDefault) @@ -27,11 +28,11 @@ func newEpochStateFromGenesis(t *testing.T) *EpochState { } func TestNewEpochStateFromGenesis(t *testing.T) { - _ = newEpochStateFromGenesis(t) + _ = newTestEpochStateFromGenesis(t) } func TestEpochState_CurrentEpoch(t *testing.T) { - s := newEpochStateFromGenesis(t) + s := newTestEpochStateFromGenesis(t) epoch, err := s.GetCurrentEpoch() require.NoError(t, err) require.Equal(t, uint64(0), epoch) @@ -44,7 +45,7 @@ func TestEpochState_CurrentEpoch(t *testing.T) { } func TestEpochState_EpochData(t *testing.T) { - s := newEpochStateFromGenesis(t) + s := newTestEpochStateFromGenesis(t) keyring, err := keystore.NewSr25519Keyring() require.NoError(t, err) @@ -71,7 +72,7 @@ func TestEpochState_EpochData(t *testing.T) { } func TestEpochState_GetStartSlotForEpoch(t *testing.T) { - s := newEpochStateFromGenesis(t) + s := newTestEpochStateFromGenesis(t) // let's say first slot is 1 second after January 1, 1970 UTC startAtTime := time.Unix(1, 0) @@ -111,7 +112,7 @@ func TestEpochState_GetStartSlotForEpoch(t *testing.T) { } func TestEpochState_ConfigData(t *testing.T) { - s := newEpochStateFromGenesis(t) + s := newTestEpochStateFromGenesis(t) data := &types.ConfigData{ C1: 1, @@ -153,7 +154,7 @@ func createAndImportBlockOne(t *testing.T, slotNumber uint64, blockState *BlockS } func TestEpochState_GetEpochForBlock(t *testing.T) { - s := newEpochStateFromGenesis(t) + s := newTestEpochStateFromGenesis(t) firstSlot := uint64(1) blockOneHeader := createAndImportBlockOne(t, firstSlot, s.blockState) @@ -210,7 +211,7 @@ func TestEpochState_GetEpochForBlock(t *testing.T) { } func TestEpochState_SetAndGetSlotDuration(t *testing.T) { - s := newEpochStateFromGenesis(t) + s := newTestEpochStateFromGenesis(t) expected := time.Millisecond * time.Duration(config.BABEConfigurationTestDefault.SlotDuration) ret, err := s.GetSlotDuration() @@ -377,7 +378,7 @@ func TestStoreAndFinalizeBabeNextEpochData(t *testing.T) { for testName, tt := range tests { t.Run(testName, func(t *testing.T) { - epochState := newEpochStateFromGenesis(t) + epochState := newTestEpochStateFromGenesis(t) for _, e := range tt.inMemoryEpoch { for i, hash := range e.hashes { @@ -560,7 +561,7 @@ func TestStoreAndFinalizeBabeNextConfigData(t *testing.T) { for testName, tt := range tests { t.Run(testName, func(t *testing.T) { - epochState := newEpochStateFromGenesis(t) + epochState := newTestEpochStateFromGenesis(t) for _, finalized := range finalizedHeaders { // mapping number #1 to the block hash @@ -641,7 +642,7 @@ func TestRetrieveChainFirstSlot(t *testing.T) { // epoch calculation, same for blocks on X // when finalisation happens Gossamer should retrieve the chain first // slot for the finalized chain, given that the other chain will be pruned - singleEpochState := newEpochStateFromGenesis(t) + singleEpochState := newTestEpochStateFromGenesis(t) // calling without any block it must return error _, err := singleEpochState.retrieveFirstNonOriginBlockSlot(common.Hash{}) @@ -733,7 +734,7 @@ func TestRetrieveChainFirstSlot(t *testing.T) { } func TestRetrieveAndUpdate(t *testing.T) { - epochState := newEpochStateFromGenesis(t) + epochState := newTestEpochStateFromGenesis(t) blockState := epochState.blockState nem := nextEpochMap[types.NextEpochData]{} @@ -823,7 +824,7 @@ func TestRetrieveAndUpdate(t *testing.T) { func TestFirstSlotNumberFromDb(t *testing.T) { // test case to check whether we have the correct first slot number in the database - epochState := newEpochStateFromGenesis(t) + epochState := newTestEpochStateFromGenesis(t) slotDuration, err := epochState.GetSlotDuration() require.NoError(t, err) @@ -855,3 +856,300 @@ func TestFirstSlotNumberFromDb(t *testing.T) { require.EqualValuesf(t, predefinedSlotNumber, firstSlotNumber, "expected: %d, got: %d", predefinedSlotNumber, firstSlotNumber) } + +func TestNextEpochDataAndConfigInDisk(t *testing.T) { + epochState := newTestEpochStateFromGenesis(t) + db := NewInMemoryDB(t) + dbTable := database.NewTable(db, epochPrefix) + epochState.db = dbTable + slotDuration, err := epochState.GetSlotDuration() + require.NoError(t, err) + + genesisHash := epochState.blockState.genesisHash + // setting a predefined slot number + predefinedSlotNumber := uint64(1000) + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, predefinedSlotNumber) + err = epochState.blockState.db.Put(firstSlotNumberKey, buf) + require.NoError(t, err) + + slotNumber := currentSlot(uint64(time.Now().UnixNano()), + uint64(slotDuration.Nanoseconds())) + + firstNonOrirginBlock := types.NewEmptyHeader() + firstNonOrirginBlock.ParentHash = genesisHash + firstNonOrirginBlock.Number = 1 + firstNonOrirginBlock.Digest = buildBlockPrimaryDigest(t, + types.BabePrimaryPreDigest{AuthorityIndex: 0, SlotNumber: slotNumber}) + + err = epochState.blockState.AddBlock( + &types.Block{Header: *firstNonOrirginBlock, Body: *types.NewBody([]types.Extrinsic{})}) + require.NoError(t, err) + + digest := types.NewDigest() + preRuntimeDigest := types.PreRuntimeDigest{ + ConsensusEngineID: types.BabeEngineID, + // bytes for PreRuntimeDigest that was created in setupHeaderFile function + Data: []byte{1, 60, 0, 0, 0, 150, 89, 189, 15, 0, 0, 0, 0, 112, 237, 173, 28, 144, 100, 255, + 247, 140, 177, 132, 53, 34, 61, 138, 218, 245, 234, 4, 194, 75, 26, 135, 102, 227, 220, 1, 235, 3, 204, + 106, 12, 17, 183, 151, 147, 212, 227, 28, 192, 153, 8, 56, 34, 156, 68, 254, 209, 102, 154, 124, 124, + 121, 225, 230, 208, 169, 99, 116, 214, 73, 103, 40, 6, 157, 30, 247, 57, 226, 144, 73, 122, 14, 59, 114, + 143, 168, 143, 203, 221, 58, 85, 4, 224, 239, 222, 2, 66, 231, 168, 6, 221, 79, 169, 38, 12}, + } + + preRuntimeDigestItem := types.NewDigestItem() + err = preRuntimeDigestItem.SetValue(preRuntimeDigest) + require.NoError(t, err) + preRuntimeDigestItemValue, err := preRuntimeDigestItem.Value() + require.NoError(t, err) + digest.Add(preRuntimeDigestItemValue) + + sealDigest := types.SealDigest{ + ConsensusEngineID: types.BabeEngineID, + // bytes for SealDigest that was created in setupHeaderFile function + Data: []byte{158, 127, 40, 221, 220, 242, 124, 30, 107, 50, 141, 86, 148, 195, 104, 213, 178, 236, 93, 190, + 14, 65, 42, 225, 201, 143, 136, 213, 59, 228, 216, 80, 47, 172, 87, 31, 63, 25, 201, 202, 175, 40, 26, + 103, 51, 25, 36, 30, 12, 80, 149, 166, 131, 173, 52, 49, 98, 4, 8, 138, 54, 164, 189, 134}, + } + + sealDigestItem := types.NewDigestItem() + err = sealDigestItem.SetValue(sealDigest) + require.NoError(t, err) + + sealDigestItemValue, err := sealDigestItem.Value() + require.NoError(t, err) + digest.Add(sealDigestItemValue) + + expectedHeader := &types.Header{ + ParentHash: common.MustHexToHash("0x3b45c9c22dcece75a30acc9c2968cb311e6b0557350f83b430f47559db786975"), + Number: 1482002, + StateRoot: common.MustHexToHash("0x09f9ca28df0560c2291aa16b56e15e07d1e1927088f51356d522722aa90ca7cb"), + ExtrinsicsRoot: common.MustHexToHash("0xda26dc8c1455f8f81cae12e4fc59e23ce961b2c837f6d3f664283af906d344e0"), + Digest: digest, + } + + keyring, _ := keystore.NewSr25519Keyring() + + keyPairs := []*sr25519.Keypair{ + keyring.KeyAlice, keyring.KeyBob, keyring.KeyCharlie, + } + + authorities := make([]types.AuthorityRaw, len(keyPairs)) + for i, keyPair := range keyPairs { + authorities[i] = types.AuthorityRaw{ + Key: keyPair.Public().(*sr25519.PublicKey).AsBytes(), + } + } + + genericNextEpochDigest := createBABEConsensusDigest(t, types.NextEpochData{ + Authorities: authorities, + Randomness: [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}, + }) + + versionedNextConfigData := types.NewVersionedNextConfigData() + versionedNextConfigData.SetValue(types.NextConfigDataV1{ + C1: 9, + C2: 10, + SecondarySlots: 1, + }) + genericNextConfigDataDigest := createBABEConsensusDigest(t, versionedNextConfigData) + + consensusDigests := []types.ConsensusDigest{ + genericNextEpochDigest, genericNextConfigDataDigest, + } + + nextEpochData := types.NewBabeConsensusDigest() + err = scale.Unmarshal(consensusDigests[0].Data, &nextEpochData) + if err != nil { + t.Errorf("error unmarshalling next epoch data: %s", err) + } + + // Handle config and epoch data digests + err = epochState.HandleBABEDigest(expectedHeader, nextEpochData) + + require.NoError(t, err) + + nextConfigData := types.NewBabeConsensusDigest() + err = scale.Unmarshal(consensusDigests[1].Data, &nextConfigData) + if err != nil { + t.Errorf("error unmarshalling next config data: %s", err) + } + + err = epochState.HandleBABEDigest(expectedHeader, nextConfigData) + require.NoError(t, err) + + // Making sure that we have available storeSkipToEpoch prop on disk + epochState.baseState.db = db + err = epochState.baseState.storeSkipToEpoch(0) + + require.NoError(t, err) + + // Check if the next epoch data and config data are stored in the database + epochState, err = NewEpochState(db, epochState.blockState, config.BABEConfigurationTestDefault) + require.NoError(t, err) + require.Equal(t, len(epochState.nextEpochData), 1) + require.Equal(t, len(epochState.nextConfigData), 1) + +} + +func createBABEConsensusDigest(t *testing.T, digestData any) types.ConsensusDigest { + t.Helper() + + babeConsensusDigest := types.NewBabeConsensusDigest() + require.NoError(t, babeConsensusDigest.SetValue(digestData)) + + marshaledData, err := scale.Marshal(babeConsensusDigest) + require.NoError(t, err) + + return types.ConsensusDigest{ + ConsensusEngineID: types.BabeEngineID, + Data: marshaledData, + } +} + +func TestDeleteNextEpochDataAndConfig(t *testing.T) { + epochState := newTestEpochStateFromGenesis(t) + db := NewInMemoryDB(t) + // defining the db in the right context + dbTable := database.NewTable(db, epochPrefix) + epochState.db = dbTable + + genesisHash := epochState.blockState.genesisHash + // setting a predefined slot number + predefinedSlotNumber := uint64(5) + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, predefinedSlotNumber) + err := epochState.blockState.db.Put(firstSlotNumberKey, buf) + require.NoError(t, err) + + slotNumber := 0 + + firstNonOrirginBlock := types.NewEmptyHeader() + firstNonOrirginBlock.ParentHash = genesisHash + firstNonOrirginBlock.Number = 1 + firstNonOrirginBlock.Digest = buildBlockPrimaryDigest(t, + types.BabePrimaryPreDigest{AuthorityIndex: 0, SlotNumber: uint64(slotNumber)}) + + err = epochState.blockState.AddBlock( + &types.Block{Header: *firstNonOrirginBlock, Body: *types.NewBody([]types.Extrinsic{})}) + require.NoError(t, err) + + babeHeader := types.NewBabeDigest() + err = babeHeader.SetValue(*types.NewBabePrimaryPreDigest(0, 5, [32]byte{}, [64]byte{})) + require.NoError(t, err) + + enc, err := scale.Marshal(babeHeader) + require.NoError(t, err) + digest := types.NewDigest() + preRuntimeDigest := types.PreRuntimeDigest{ + ConsensusEngineID: types.BabeEngineID, + Data: enc, + } + + preRuntimeDigestItem := types.NewDigestItem() + err = preRuntimeDigestItem.SetValue(preRuntimeDigest) + require.NoError(t, err) + preRuntimeDigestItemValue, err := preRuntimeDigestItem.Value() + require.NoError(t, err) + digest.Add(preRuntimeDigestItemValue) + + sealDigest := types.SealDigest{ + ConsensusEngineID: types.BabeEngineID, + Data: []byte{158, 127, 40, 221, 220, 242, 124, 30, 107, 50, 141, 86, 148, 195, 104, 213, 178, 236, 93, 190, + 14, 65, 42, 225, 201, 143, 136, 213, 59, 228, 216, 80, 47, 172, 87, 31, 63, 25, 201, 202, 175, 40, 26, + 103, 51, 25, 36, 30, 12, 80, 149, 166, 131, 173, 52, 49, 98, 4, 8, 138, 54, 164, 189, 134}, + } + + sealDigestItem := types.NewDigestItem() + err = sealDigestItem.SetValue(sealDigest) + require.NoError(t, err) + + sealDigestItemValue, err := sealDigestItem.Value() + require.NoError(t, err) + digest.Add(sealDigestItemValue) + + expectedHeader := &types.Header{ + ParentHash: common.MustHexToHash("0x3b45c9c22dcece75a30acc9c2968cb311e6b0557350f83b430f47559db786975"), + Number: 5, + StateRoot: common.MustHexToHash("0x09f9ca28df0560c2291aa16b56e15e07d1e1927088f51356d522722aa90ca7cb"), + ExtrinsicsRoot: common.MustHexToHash("0xda26dc8c1455f8f81cae12e4fc59e23ce961b2c837f6d3f664283af906d344e0"), + Digest: digest, + } + + keyring, _ := keystore.NewSr25519Keyring() + + keyPairs := []*sr25519.Keypair{ + keyring.KeyAlice, keyring.KeyBob, keyring.KeyCharlie, + } + + authorities := make([]types.AuthorityRaw, len(keyPairs)) + for i, keyPair := range keyPairs { + authorities[i] = types.AuthorityRaw{ + Key: keyPair.Public().(*sr25519.PublicKey).AsBytes(), + } + } + + genericNextEpochDigest := createBABEConsensusDigest(t, types.NextEpochData{ + Authorities: authorities, + Randomness: [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}, + }) + + versionedNextConfigData := types.NewVersionedNextConfigData() + versionedNextConfigData.SetValue(types.NextConfigDataV1{ + C1: 9, + C2: 10, + SecondarySlots: 1, + }) + genericNextConfigDataDigest := createBABEConsensusDigest(t, versionedNextConfigData) + + consensusDigests := []types.ConsensusDigest{ + genericNextEpochDigest, genericNextConfigDataDigest, + } + + nextEpochData := types.NewBabeConsensusDigest() + err = scale.Unmarshal(consensusDigests[0].Data, &nextEpochData) + if err != nil { + t.Errorf("error unmarshalling next epoch data: %s", err) + } + + // Handle config and epoch data digests + err = epochState.HandleBABEDigest(expectedHeader, nextEpochData) + + require.NoError(t, err) + + nextConfigData := types.NewBabeConsensusDigest() + err = scale.Unmarshal(consensusDigests[1].Data, &nextConfigData) + if err != nil { + t.Errorf("error unmarshalling next config data: %s", err) + } + + err = epochState.HandleBABEDigest(expectedHeader, nextConfigData) + require.NoError(t, err) + + // Making sure that we have available storeSkipToEpoch prop on disk + epochState.baseState.db = db + err = epochState.baseState.storeSkipToEpoch(0) + require.NoError(t, err) + + // Check if the next epoch data and config data are stored in the database + epochState, err = NewEpochState(db, epochState.blockState, config.BABEConfigurationTestDefault) + require.NoError(t, err) + require.Equal(t, 1, len(epochState.nextEpochData)) + require.Equal(t, 1, len(epochState.nextConfigData)) + + epochState.blockState.SetHeader(expectedHeader) + require.NoError(t, err) + + // Finalize the next epoch data and config data + err = epochState.FinalizeBABENextConfigData(expectedHeader) + require.NoError(t, err) + + err = epochState.FinalizeBABENextEpochData(expectedHeader) + require.NoError(t, err) + // Check if the next epoch data and config data are not stored in the database + // after finalisation + epochState, err = NewEpochState(db, epochState.blockState, config.BABEConfigurationTestDefault) + require.NoError(t, err) + require.Equal(t, 0, len(epochState.nextEpochData)) + require.Equal(t, 0, len(epochState.nextConfigData)) +} diff --git a/internal/database/database.go b/internal/database/database.go index b3140a4623..b827c82d2b 100644 --- a/internal/database/database.go +++ b/internal/database/database.go @@ -60,6 +60,7 @@ type Table interface { Path() string NewBatch() Batch NewIterator() (Iterator, error) + NewPrefixIterator(prefix []byte) (Iterator, error) } const DefaultDatabaseDir = "db" diff --git a/internal/database/table.go b/internal/database/table.go index 3c6eb74793..4e03cb393e 100644 --- a/internal/database/table.go +++ b/internal/database/table.go @@ -59,3 +59,7 @@ func (t *table) NewBatch() Batch { func (t *table) NewIterator() (Iterator, error) { return t.db.NewPrefixIterator(t.prefix) } + +func (t *table) NewPrefixIterator(prefix []byte) (Iterator, error) { + return t.db.NewPrefixIterator(append(t.prefix, prefix...)) +} diff --git a/lib/babe/babe.go b/lib/babe/babe.go index 0b6707c417..06ed8d25ed 100644 --- a/lib/babe/babe.go +++ b/lib/babe/babe.go @@ -300,11 +300,15 @@ func (b *Service) initiateAndGetEpochHandler(epoch uint64) (*epochHandler, error } func (b *Service) runEngine() error { - epoch, err := b.epochState.GetCurrentEpoch() + bestBlock, err := b.blockState.BestBlockHeader() if err != nil { - return fmt.Errorf("failed to get current epoch: %s", err) + return fmt.Errorf("getting best block: %w", err) } + epoch, err := b.epochState.GetEpochForBlock(bestBlock) + if err != nil { + return fmt.Errorf("failed to get current epoch: %s", err) + } for { next, err := b.handleEpoch(epoch) if errors.Is(err, errServicePaused) || errors.Is(err, context.Canceled) {