From baea974bce9310c1b6b83ca0b1d717c05eab8ace Mon Sep 17 00:00:00 2001 From: Axay Sagathiya Date: Thu, 22 Aug 2024 19:58:41 +0530 Subject: [PATCH 1/3] test(dot/parachain/backing): can not second multiple candidate per relay parent without prospective parachain (#4134) - I have written a test to ensure it's impossible to second multiple candidates per relay parent when prospective parachain mode is inactive(async backing is not supported). - received a candidate backing message to second a candidate. Ensured it seconds the candidate. - I received another candidate backing message to second a candidate with the same relay parent. I ensured this candidate got rejected. - Written some helper functions to reuse the code. - Added a function to stop the mockable overseer and wait some time to finish the ongoing process before exiting the test. - I have added an extra check to handle expected overseer action in the mockable overseer. - Because of this extra check, other tests failed, so I fixed them. --- dot/parachain/backing/integration_test.go | 277 +++++++++++++----- .../collator-protocol/message_test.go | 46 +-- dot/parachain/overseer/mockable_overseer.go | 3 + 3 files changed, 232 insertions(+), 94 deletions(-) diff --git a/dot/parachain/backing/integration_test.go b/dot/parachain/backing/integration_test.go index 184b9c69e4..3c47fbb3f0 100644 --- a/dot/parachain/backing/integration_test.go +++ b/dot/parachain/backing/integration_test.go @@ -24,6 +24,12 @@ import ( gomock "go.uber.org/mock/gomock" ) +// Ensure overseer stops before test completion +func stopOverseerAndWaitForCompletion(overseer *overseer.MockableOverseer) { + overseer.Stop() + time.Sleep(100 * time.Millisecond) // Give some time for any ongoing processes to finish +} + // register the backing subsystem, run backing subsystem, start overseer func initBackingAndOverseerMock(t *testing.T) (*backing.CandidateBacking, *overseer.MockableOverseer) { t.Helper() @@ -207,10 +213,52 @@ func signingContext(t *testing.T) parachaintypes.SigningContext { } } +// this is a helper function to create an expected action for the ValidateFromExhaustive message +// that will return a valid result +func validResponseForValidateFromExhaustive( + headData parachaintypes.HeadData, + pvd parachaintypes.PersistedValidationData, +) func(msg any) bool { + return func(msg any) bool { + msgValidate, ok := msg.(candidatevalidation.ValidateFromExhaustive) + if !ok { + return false + } + + msgValidate.Ch <- parachaintypes.OverseerFuncRes[candidatevalidation.ValidationResult]{ + Data: candidatevalidation.ValidationResult{ + ValidResult: &candidatevalidation.ValidValidationResult{ + CandidateCommitments: parachaintypes.CandidateCommitments{ + HeadData: headData, + UpwardMessages: []parachaintypes.UpwardMessage{}, + HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, + NewValidationCode: nil, + ProcessedDownwardMessages: 0, + HrmpWatermark: 0, + }, + PersistedValidationData: pvd, + }, + }, + } + return true + } +} + +// this is a expected action for the StoreAvailableData message that will return a nil error +func storeAvailableData(msg any) bool { + store, ok := msg.(availabilitystore.StoreAvailableData) + if !ok { + return false + } + + store.Sender <- nil + return true +} + // we can second a valid candidate when the previous candidate has been found invalid func TestSecondsValidCandidate(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -313,7 +361,7 @@ func TestSecondsValidCandidate(t *testing.T) { PoV: pov1, }) - time.Sleep(3 * time.Second) + time.Sleep(1 * time.Second) pov2 := parachaintypes.PoV{BlockData: []byte{45, 46, 47}} @@ -344,38 +392,7 @@ func TestSecondsValidCandidate(t *testing.T) { mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). Return(&validationCode2, nil) - validate2 := func(msg any) bool { - validateFromExhaustive, ok := msg.(candidatevalidation.ValidateFromExhaustive) - if !ok { - return false - } - - validateFromExhaustive.Ch <- parachaintypes.OverseerFuncRes[candidatevalidation.ValidationResult]{ - Data: candidatevalidation.ValidationResult{ - ValidResult: &candidatevalidation.ValidValidationResult{ - CandidateCommitments: parachaintypes.CandidateCommitments{ - UpwardMessages: []parachaintypes.UpwardMessage{}, - HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, - NewValidationCode: nil, - HeadData: candidate2.Commitments.HeadData, - ProcessedDownwardMessages: 0, - HrmpWatermark: 0, - }, - PersistedValidationData: pvd2, - }, - }, - } - return true - } - - storeAvailableData := func(msg any) bool { - store, ok := msg.(availabilitystore.StoreAvailableData) - if !ok { - return false - } - store.Sender <- nil - return true - } + validate2 := validResponseForValidateFromExhaustive(candidate2.Commitments.HeadData, pvd2) distribute := func(msg any) bool { // we have seconded a candidate and shared the statement to peers @@ -412,14 +429,14 @@ func TestSecondsValidCandidate(t *testing.T) { PoV: pov2, }) - time.Sleep(3 * time.Second) + time.Sleep(1 * time.Second) } // candidate reaches quorum. // in legacy backing, we need 2 approvals to reach quorum. func TestCandidateReachesQuorum(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -519,47 +536,20 @@ func TestCandidateReachesQuorum(t *testing.T) { return true } - validate1 := func(msg any) bool { - msgValidate, ok := msg.(candidatevalidation.ValidateFromExhaustive) - if !ok { - return false - } - - msgValidate.Ch <- parachaintypes.OverseerFuncRes[candidatevalidation.ValidationResult]{ - Data: candidatevalidation.ValidationResult{ - ValidResult: &candidatevalidation.ValidValidationResult{ - CandidateCommitments: parachaintypes.CandidateCommitments{ - HeadData: headData, - UpwardMessages: []parachaintypes.UpwardMessage{}, - HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, - NewValidationCode: nil, - ProcessedDownwardMessages: 0, - HrmpWatermark: 0, - }, - PersistedValidationData: pvd, - }, - }, - } - return true - } - - storeData := func(msg any) bool { - store, ok := msg.(availabilitystore.StoreAvailableData) - if !ok { - return false - } - - store.Sender <- nil - return true - } + validate := validResponseForValidateFromExhaustive(headData, pvd) distribute := func(msg any) bool { _, ok := msg.(parachaintypes.StatementDistributionMessageShare) return ok } + provisionerMessageProvisionableData := func(msg any) bool { + _, ok := msg.(parachaintypes.ProvisionerMessageProvisionableData) + return ok + } + // set expected actions for overseer messages we send from the subsystem. - overseer.ExpectActions(fetchPov, validate1, storeData, distribute) + overseer.ExpectActions(fetchPov, validate, storeAvailableData, distribute, provisionerMessageProvisionableData) // receive statement message from overseer to candidate backing subsystem containing seconded statement overseer.ReceiveMessage(backing.StatementMessage{ @@ -632,15 +622,13 @@ func TestCandidateReachesQuorum(t *testing.T) { // as it is a valid statement, we do not validate the candidate, just store into the statement table. require.Len(t, backableCandidates, 1) require.Len(t, backableCandidates[0].ValidityVotes, 3) - - time.Sleep(3 * time.Second) } // if the validation of the candidate has failed this does not stop the work of this subsystem // and so it is not fatal to the node. func TestValidationFailDoesNotStopSubsystem(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -779,3 +767,146 @@ func TestValidationFailDoesNotStopSubsystem(t *testing.T) { require.Len(t, backableCandidates, 0) } + +// It's impossible to second multiple candidates per relay parent without prospective parachains. +func TestCanNotSecondMultipleCandidatesPerRelayParent(t *testing.T) { + candidateBacking, overseer := initBackingAndOverseerMock(t) + defer stopOverseerAndWaitForCompletion(overseer) + + paraValidators := parachainValidators(t, candidateBacking.Keystore) + numOfValidators := uint(len(paraValidators)) + relayParent := getDummyHash(t, 5) + paraID := uint32(1) + + ctrl := gomock.NewController(t) + mockBlockState := backing.NewMockBlockState(ctrl) + mockRuntime := backing.NewMockInstance(ctrl) + mockImplicitView := backing.NewMockImplicitView(ctrl) + + candidateBacking.BlockState = mockBlockState + candidateBacking.ImplicitView = mockImplicitView + + // mock BlockState methods + mockBlockState.EXPECT().GetRuntime(gomock.AssignableToTypeOf(common.Hash{})). + Return(mockRuntime, nil).Times(4) + + // mock Runtime Instance methods + mockRuntime.EXPECT().ParachainHostAsyncBackingParams(). + Return(nil, wazero_runtime.ErrExportFunctionNotFound) + mockRuntime.EXPECT().ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil).Times(3) + mockRuntime.EXPECT().ParachainHostValidators(). + Return(paraValidators, nil) + mockRuntime.EXPECT().ParachainHostValidatorGroups(). + Return(validatorGroups(t), nil) + mockRuntime.EXPECT().ParachainHostAvailabilityCores(). + Return(availabilityCores(t), nil) + mockRuntime.EXPECT().ParachainHostMinimumBackingVotes(). + Return(backing.LEGACY_MIN_BACKING_VOTES, nil) + mockRuntime.EXPECT(). + ParachainHostSessionExecutorParams(gomock.AssignableToTypeOf(parachaintypes.SessionIndex(0))). + Return(nil, wazero_runtime.ErrExportFunctionNotFound).Times(2) + + //mock ImplicitView + mockImplicitView.EXPECT().AllAllowedRelayParents(). + Return([]common.Hash{}) + + // to make entry in perRelayParent map + overseer.ReceiveMessage(parachaintypes.ActiveLeavesUpdateSignal{ + Activated: ¶chaintypes.ActivatedLeaf{Hash: relayParent, Number: 1}, + }) + + time.Sleep(1 * time.Second) + + headData := parachaintypes.HeadData{Data: []byte{4, 5, 6}} + + pov := parachaintypes.PoV{BlockData: []byte{1, 2, 3}} + povHash, err := pov.Hash() + require.NoError(t, err) + + pvd := dummyPVD(t) + pvdHash, err := pvd.Hash() + require.NoError(t, err) + + validationCode1 := parachaintypes.ValidationCode{1, 2, 3} + + candidate1 := newCommittedCandidate(t, + paraID, + headData, + povHash, + relayParent, + makeErasureRoot(t, numOfValidators, pov, pvd), + pvdHash, + validationCode1, + ) + + validate := validResponseForValidateFromExhaustive(headData, pvd) + + distribute := func(msg any) bool { + // we have seconded a candidate and shared the statement to peers + share, ok := msg.(parachaintypes.StatementDistributionMessageShare) + if !ok { + return false + } + + statement, err := share.SignedFullStatementWithPVD.SignedFullStatement.Payload.Value() + require.NoError(t, err) + + require.Equal(t, statement, parachaintypes.Seconded(candidate1)) + require.Equal(t, *share.SignedFullStatementWithPVD.PersistedValidationData, pvd) + require.Equal(t, share.RelayParent, relayParent) + + return true + } + + informSeconded := func(msg any) bool { + // informed collator protocol that we have seconded the candidate + _, ok := msg.(collatorprotocolmessages.Seconded) + return ok + } + + overseer.ExpectActions(validate, storeAvailableData, distribute, informSeconded) + + // mocked for candidate1 + mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). + Return(&validationCode1, nil) + + overseer.ReceiveMessage(backing.SecondMessage{ + RelayParent: relayParent, + CandidateReceipt: candidate1.ToPlain(), + PersistedValidationData: pvd, + PoV: pov, + }) + + time.Sleep(1 * time.Second) + + validationCode2 := parachaintypes.ValidationCode{4, 5, 6} + + candidate2 := newCommittedCandidate(t, + paraID, + headData, + povHash, + relayParent, + makeErasureRoot(t, numOfValidators, pov, pvd), + pvdHash, + validationCode2, + ) + + // Validate the candidate, but the candidate is rejected because the leaf is already occupied. + // should not expect `StatementDistributionMessageShare` and `collator protocol messages.Seconded` overseer messages. + overseer.ExpectActions(validate, storeAvailableData) + + // mocked for candidate2 + mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). + Return(&validationCode2, nil) + + // Try to second candidate with the same relay parent again. + overseer.ReceiveMessage(backing.SecondMessage{ + RelayParent: relayParent, + CandidateReceipt: candidate2.ToPlain(), + PersistedValidationData: pvd, + PoV: pov, + }) + + time.Sleep(1 * time.Second) +} diff --git a/dot/parachain/collator-protocol/message_test.go b/dot/parachain/collator-protocol/message_test.go index e00c8a8122..9dd209a0bd 100644 --- a/dot/parachain/collator-protocol/message_test.go +++ b/dot/parachain/collator-protocol/message_test.go @@ -19,7 +19,6 @@ import ( "github.com/ChainSafe/gossamer/dot/network" collatorprotocolmessages "github.com/ChainSafe/gossamer/dot/parachain/collator-protocol/messages" networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" - "github.com/ChainSafe/gossamer/dot/parachain/overseer" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/dot/peerset" ) @@ -375,17 +374,22 @@ func TestHandleCollationMessageDeclare(t *testing.T) { c := c t.Run(c.description, func(t *testing.T) { t.Parallel() + + subsystemToOverseer := make(chan any) cpvs := CollatorProtocolValidatorSide{ - peerData: c.peerData, - currentAssignments: c.currentAssignments, + SubSystemToOverseer: subsystemToOverseer, + peerData: c.peerData, + currentAssignments: c.currentAssignments, } - mockOverseer := overseer.NewMockableOverseer(t) - mockOverseer.RegisterSubsystem(&cpvs) - cpvs.SubSystemToOverseer = mockOverseer.GetSubsystemToOverseerChannel() - - mockOverseer.Start() - defer mockOverseer.Stop() + // ensure that the expected messages are sent to the overseer + if len(c.expectedMessages) > 0 { + go func() { + for _, expectedMessage := range c.expectedMessages { + require.Equal(t, expectedMessage, <-subsystemToOverseer) + } + }() + } msg := collatorprotocolmessages.NewCollationProtocol() vdtChild := collatorprotocolmessages.NewCollatorProtocolMessage() @@ -444,7 +448,6 @@ func TestHandleCollationMessageAdvertiseCollation(t *testing.T) { }, errString: ErrRelayParentUnknown.Error(), }, - { description: "fail with unknown peer if peer is not tracked in our list of active collators", advertiseCollation: collatorprotocolmessages.AdvertiseCollation(testRelayParent), @@ -574,19 +577,21 @@ func TestHandleCollationMessageAdvertiseCollation(t *testing.T) { t.Run(c.description, func(t *testing.T) { t.Parallel() + subsystemToOverseer := make(chan any) cpvs := CollatorProtocolValidatorSide{ - net: c.net, - perRelayParent: c.perRelayParent, - peerData: c.peerData, - activeLeaves: c.activeLeaves, + SubSystemToOverseer: subsystemToOverseer, + net: c.net, + perRelayParent: c.perRelayParent, + peerData: c.peerData, + activeLeaves: c.activeLeaves, } - mockOverseer := overseer.NewMockableOverseer(t) - mockOverseer.RegisterSubsystem(&cpvs) - cpvs.SubSystemToOverseer = mockOverseer.GetSubsystemToOverseerChannel() - - mockOverseer.Start() - defer mockOverseer.Stop() + // ensure that the expected messages are sent to the overseer + if c.expectedMessage != nil { + go func() { + require.Equal(t, c.expectedMessage, <-subsystemToOverseer) + }() + } msg := collatorprotocolmessages.NewCollationProtocol() vdtChild := collatorprotocolmessages.NewCollatorProtocolMessage() @@ -604,7 +609,6 @@ func TestHandleCollationMessageAdvertiseCollation(t *testing.T) { } else { require.ErrorContains(t, err, c.errString) } - }) } } diff --git a/dot/parachain/overseer/mockable_overseer.go b/dot/parachain/overseer/mockable_overseer.go index a9a55c2ef5..529444a02e 100644 --- a/dot/parachain/overseer/mockable_overseer.go +++ b/dot/parachain/overseer/mockable_overseer.go @@ -96,6 +96,9 @@ func (m *MockableOverseer) processMessages() { } actionIndex = actionIndex + 1 + } else { + m.t.Errorf("unexpected message: %T", msg) + return } case <-m.ctx.Done(): if err := m.ctx.Err(); err != nil { From cd5a2bfea9dc97ceec4e02aaf96bc4ef313e1974 Mon Sep 17 00:00:00 2001 From: Axay Sagathiya Date: Thu, 29 Aug 2024 12:36:16 +0530 Subject: [PATCH 2/3] test(dot/parachain/backing): ensure new lead view doesn't clobber the old view (#4148) --- dot/parachain/backing/integration_test.go | 124 ++++++++++++++++++++ dot/parachain/overseer/mockable_overseer.go | 4 + 2 files changed, 128 insertions(+) diff --git a/dot/parachain/backing/integration_test.go b/dot/parachain/backing/integration_test.go index 3c47fbb3f0..41c2f0cfc5 100644 --- a/dot/parachain/backing/integration_test.go +++ b/dot/parachain/backing/integration_test.go @@ -910,3 +910,127 @@ func TestCanNotSecondMultipleCandidatesPerRelayParent(t *testing.T) { time.Sleep(1 * time.Second) } + +// The new leaf view doesn't clobber the old view when we update active leaves. +func TestNewLeafDoesNotClobberOld(t *testing.T) { + candidateBacking, overseer := initBackingAndOverseerMock(t) + defer stopOverseerAndWaitForCompletion(overseer) + + paraValidators := parachainValidators(t, candidateBacking.Keystore) + numOfValidators := uint(len(paraValidators)) + relayParent1 := getDummyHash(t, 5) + relayParent2 := getDummyHash(t, 6) + paraID := uint32(1) + validationCode := parachaintypes.ValidationCode{1, 2, 3} + + ctrl := gomock.NewController(t) + mockBlockState := backing.NewMockBlockState(ctrl) + mockRuntime := backing.NewMockInstance(ctrl) + mockImplicitView := backing.NewMockImplicitView(ctrl) + + candidateBacking.BlockState = mockBlockState + candidateBacking.ImplicitView = mockImplicitView + + // mock BlockState methods + mockBlockState.EXPECT().GetRuntime(gomock.AssignableToTypeOf(common.Hash{})). + Return(mockRuntime, nil).Times(5) + + // mock Runtime Instance methods + mockRuntime.EXPECT().ParachainHostAsyncBackingParams(). + Return(nil, wazero_runtime.ErrExportFunctionNotFound).Times(2) + mockRuntime.EXPECT().ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil).Times(3) + mockRuntime.EXPECT().ParachainHostValidators(). + Return(paraValidators, nil).Times(2) + mockRuntime.EXPECT().ParachainHostValidatorGroups(). + Return(validatorGroups(t), nil).Times(2) + mockRuntime.EXPECT().ParachainHostAvailabilityCores(). + Return(availabilityCores(t), nil).Times(2) + mockRuntime.EXPECT().ParachainHostMinimumBackingVotes(). + Return(backing.LEGACY_MIN_BACKING_VOTES, nil).Times(2) + mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). + Return(&validationCode, nil) + mockRuntime.EXPECT(). + ParachainHostSessionExecutorParams(gomock.AssignableToTypeOf(parachaintypes.SessionIndex(0))). + Return(nil, wazero_runtime.ErrExportFunctionNotFound).Times(1) + + //mock ImplicitView + mockImplicitView.EXPECT().AllAllowedRelayParents(). + Return([]common.Hash{}).Times(2) + + // add relay parent 1 to active leaves + overseer.ReceiveMessage(parachaintypes.ActiveLeavesUpdateSignal{ + Activated: ¶chaintypes.ActivatedLeaf{Hash: relayParent1, Number: 1}, + }) + time.Sleep(500 * time.Millisecond) + + // add relay parent 2 to active leaves that does not clobber relay parent 1 + // and still allows seconding of candidates for relay parent 1 + overseer.ReceiveMessage(parachaintypes.ActiveLeavesUpdateSignal{ + Activated: ¶chaintypes.ActivatedLeaf{Hash: relayParent2, Number: 1}, + }) + time.Sleep(500 * time.Millisecond) + + headData := parachaintypes.HeadData{Data: []byte{4, 5, 6}} + + pov := parachaintypes.PoV{BlockData: []byte{1, 2, 3}} + povHash, err := pov.Hash() + require.NoError(t, err) + + pvd := dummyPVD(t) + pvdHash, err := pvd.Hash() + require.NoError(t, err) + + // candidate with relay parent 1 + candidate := newCommittedCandidate(t, + paraID, + headData, + povHash, + relayParent1, + makeErasureRoot(t, numOfValidators, pov, pvd), + pvdHash, + validationCode, + ) + + validate := validResponseForValidateFromExhaustive(headData, pvd) + + distribute := func(msg any) bool { + // we have seconded a candidate and shared the statement to peers + share, ok := msg.(parachaintypes.StatementDistributionMessageShare) + if !ok { + return false + } + + statement, err := share.SignedFullStatementWithPVD.SignedFullStatement.Payload.Value() + require.NoError(t, err) + + require.Equal(t, statement, parachaintypes.Seconded(candidate)) + require.Equal(t, *share.SignedFullStatementWithPVD.PersistedValidationData, pvd) + require.Equal(t, share.RelayParent, relayParent1) + + return true + } + + informSeconded := func(msg any) bool { + // informed collator protocol that we have seconded the candidate + _, ok := msg.(collatorprotocolmessages.Seconded) + return ok + } + + // If the old leaf view is clobbered, the candidate will be ignored and in that case, + // overseer does not expect `StatementDistributionMessageShare` and `collatorprotocolmessages.Seconded` + // overseer messages. So, test will fail. + // + // But, when the old leaf view is not clobbered, the candidate will be seconded. + // so, oversee expects all four overseer messages. + overseer.ExpectActions(validate, storeAvailableData, distribute, informSeconded) + + overseer.ReceiveMessage(backing.SecondMessage{ + RelayParent: relayParent1, + CandidateReceipt: candidate.ToPlain(), + PersistedValidationData: pvd, + PoV: pov, + }) + + time.Sleep(1 * time.Second) +} diff --git a/dot/parachain/overseer/mockable_overseer.go b/dot/parachain/overseer/mockable_overseer.go index 529444a02e..85f74780af 100644 --- a/dot/parachain/overseer/mockable_overseer.go +++ b/dot/parachain/overseer/mockable_overseer.go @@ -101,6 +101,10 @@ func (m *MockableOverseer) processMessages() { return } case <-m.ctx.Done(): + if actionIndex < len(m.actionsForExpectedMessages) { + m.t.Errorf("expected %d overseer actions, but got only %d", len(m.actionsForExpectedMessages), actionIndex) + } + if err := m.ctx.Err(); err != nil { m.t.Logf("ctx error: %v\n", err) } From 5ef77a84302147775049900e7f089bb9e737ba37 Mon Sep 17 00:00:00 2001 From: Kishan Mohanbhai Sagathiya Date: Thu, 5 Sep 2024 16:22:03 +0530 Subject: [PATCH 3/3] resolved some comments --- dot/parachain/network-bridge/receiver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dot/parachain/network-bridge/receiver.go b/dot/parachain/network-bridge/receiver.go index a5f97517b2..07aae3e5a5 100644 --- a/dot/parachain/network-bridge/receiver.go +++ b/dot/parachain/network-bridge/receiver.go @@ -232,8 +232,9 @@ func (nbr *NetworkBridgeReceiver) handleNetworkEvents(event network.NetworkEvent switch event.Event { case network.Connected: nbr.SubsystemsToOverseer <- events.PeerConnected{ - PeerID: event.PeerID, - // TODO: Add remaining fields + PeerID: event.PeerID, + OverservedRole: event.Role, + // TODO: Add protocol versions when we have them } case network.Disconnected: nbr.SubsystemsToOverseer <- events.PeerDisconnected{ @@ -270,7 +271,6 @@ func (nbr *NetworkBridgeReceiver) ProcessActiveLeavesUpdateSignal( } sort.Sort(SortableActivatedLeaves(newLiveHeads)) - // TODO: do I need to store these live heads or just pass them to update view? nbr.liveHeads = newLiveHeads if !majorSyncing {