From 78daae93125bb03df0e6bc75f5d9cfda381487cd Mon Sep 17 00:00:00 2001 From: axaysagathiya Date: Tue, 20 Aug 2024 11:16:49 +0530 Subject: [PATCH 1/4] ensure cant second multiple candidate per parent --- dot/parachain/backing/integration_test.go | 259 +++++++++++++++----- dot/parachain/overseer/mockable_overseer.go | 3 + 2 files changed, 196 insertions(+), 66 deletions(-) diff --git a/dot/parachain/backing/integration_test.go b/dot/parachain/backing/integration_test.go index 7274d49097..3717f8089f 100644 --- a/dot/parachain/backing/integration_test.go +++ b/dot/parachain/backing/integration_test.go @@ -207,6 +207,48 @@ func signingContext(t *testing.T) parachaintypes.SigningContext { } } +// this is a helper function to create an expected action for the ValidateFromExhaustive message +// that will return a valid result +func validResponseForValidateFromExhaustive( + headData parachaintypes.HeadData, + pvd parachaintypes.PersistedValidationData, +) func(msg any) bool { + return func(msg any) bool { + msgValidate, ok := msg.(candidatevalidation.ValidateFromExhaustive) + if !ok { + return false + } + + msgValidate.Ch <- parachaintypes.OverseerFuncRes[candidatevalidation.ValidationResult]{ + Data: candidatevalidation.ValidationResult{ + ValidResult: &candidatevalidation.ValidValidationResult{ + CandidateCommitments: parachaintypes.CandidateCommitments{ + HeadData: headData, + UpwardMessages: []parachaintypes.UpwardMessage{}, + HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, + NewValidationCode: nil, + ProcessedDownwardMessages: 0, + HrmpWatermark: 0, + }, + PersistedValidationData: pvd, + }, + }, + } + return true + } +} + +// this is a expected action for the StoreAvailableData message that will return a nil error +func storeAvailableData(msg any) bool { + store, ok := msg.(availabilitystore.StoreAvailableData) + if !ok { + return false + } + + store.Sender <- nil + return true +} + // we can second a valid candidate when the previous candidate has been found invalid func TestSecondsValidCandidate(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) @@ -344,38 +386,7 @@ func TestSecondsValidCandidate(t *testing.T) { mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). Return(&validationCode2, nil) - validate2 := func(msg any) bool { - validateFromExhaustive, ok := msg.(candidatevalidation.ValidateFromExhaustive) - if !ok { - return false - } - - validateFromExhaustive.Ch <- parachaintypes.OverseerFuncRes[candidatevalidation.ValidationResult]{ - Data: candidatevalidation.ValidationResult{ - ValidResult: &candidatevalidation.ValidValidationResult{ - CandidateCommitments: parachaintypes.CandidateCommitments{ - UpwardMessages: []parachaintypes.UpwardMessage{}, - HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, - NewValidationCode: nil, - HeadData: candidate2.Commitments.HeadData, - ProcessedDownwardMessages: 0, - HrmpWatermark: 0, - }, - PersistedValidationData: pvd2, - }, - }, - } - return true - } - - storeAvailableData := func(msg any) bool { - store, ok := msg.(availabilitystore.StoreAvailableData) - if !ok { - return false - } - store.Sender <- nil - return true - } + validate2 := validResponseForValidateFromExhaustive(candidate2.Commitments.HeadData, pvd2) distribute := func(msg any) bool { // we have seconded a candidate and shared the statement to peers @@ -519,47 +530,20 @@ func TestCandidateReachesQuorum(t *testing.T) { return true } - validate1 := func(msg any) bool { - msgValidate, ok := msg.(candidatevalidation.ValidateFromExhaustive) - if !ok { - return false - } - - msgValidate.Ch <- parachaintypes.OverseerFuncRes[candidatevalidation.ValidationResult]{ - Data: candidatevalidation.ValidationResult{ - ValidResult: &candidatevalidation.ValidValidationResult{ - CandidateCommitments: parachaintypes.CandidateCommitments{ - HeadData: headData, - UpwardMessages: []parachaintypes.UpwardMessage{}, - HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, - NewValidationCode: nil, - ProcessedDownwardMessages: 0, - HrmpWatermark: 0, - }, - PersistedValidationData: pvd, - }, - }, - } - return true - } - - storeData := func(msg any) bool { - store, ok := msg.(availabilitystore.StoreAvailableData) - if !ok { - return false - } - - store.Sender <- nil - return true - } + validate := validResponseForValidateFromExhaustive(headData, pvd) distribute := func(msg any) bool { _, ok := msg.(parachaintypes.StatementDistributionMessageShare) return ok } + provisionerMessageProvisionableData := func(msg any) bool { + _, ok := msg.(parachaintypes.ProvisionerMessageProvisionableData) + return ok + } + // set expected actions for overseer messages we send from the subsystem. - overseer.ExpectActions(fetchPov, validate1, storeData, distribute) + overseer.ExpectActions(fetchPov, validate, storeAvailableData, distribute, provisionerMessageProvisionableData) // receive statement message from overseer to candidate backing subsystem containing seconded statement overseer.ReceiveMessage(backing.StatementMessage{ @@ -779,3 +763,146 @@ func TestValidationFailDoesNotStopSubsystem(t *testing.T) { require.Len(t, backableCandidates, 0) } + +// It's impossible to second multiple candidates per relay parent without prospective parachains. +func TestCanNotSecondMultipleCandidatesPerRelayParent(t *testing.T) { + candidateBacking, overseer := initBackingAndOverseerMock(t) + defer overseer.Stop() + + paraValidators := parachainValidators(t, candidateBacking.Keystore) + numOfValidators := uint(len(paraValidators)) + relayParent := getDummyHash(t, 5) + paraID := uint32(1) + + ctrl := gomock.NewController(t) + mockBlockState := backing.NewMockBlockState(ctrl) + mockRuntime := backing.NewMockInstance(ctrl) + mockImplicitView := backing.NewMockImplicitView(ctrl) + + candidateBacking.BlockState = mockBlockState + candidateBacking.ImplicitView = mockImplicitView + + // mock BlockState methods + mockBlockState.EXPECT().GetRuntime(gomock.AssignableToTypeOf(common.Hash{})). + Return(mockRuntime, nil).Times(4) + + // mock Runtime Instance methods + mockRuntime.EXPECT().ParachainHostAsyncBackingParams(). + Return(nil, wazero_runtime.ErrExportFunctionNotFound) + mockRuntime.EXPECT().ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil).Times(3) + mockRuntime.EXPECT().ParachainHostValidators(). + Return(paraValidators, nil) + mockRuntime.EXPECT().ParachainHostValidatorGroups(). + Return(validatorGroups(t), nil) + mockRuntime.EXPECT().ParachainHostAvailabilityCores(). + Return(availabilityCores(t), nil) + mockRuntime.EXPECT().ParachainHostMinimumBackingVotes(). + Return(backing.LEGACY_MIN_BACKING_VOTES, nil) + mockRuntime.EXPECT(). + ParachainHostSessionExecutorParams(gomock.AssignableToTypeOf(parachaintypes.SessionIndex(0))). + Return(nil, wazero_runtime.ErrExportFunctionNotFound).Times(2) + + //mock ImplicitView + mockImplicitView.EXPECT().AllAllowedRelayParents(). + Return([]common.Hash{}) + + // to make entry in perRelayParent map + overseer.ReceiveMessage(parachaintypes.ActiveLeavesUpdateSignal{ + Activated: ¶chaintypes.ActivatedLeaf{Hash: relayParent, Number: 1}, + }) + + time.Sleep(1 * time.Second) + + headData := parachaintypes.HeadData{Data: []byte{4, 5, 6}} + + pov := parachaintypes.PoV{BlockData: []byte{1, 2, 3}} + povHash, err := pov.Hash() + require.NoError(t, err) + + pvd := dummyPVD(t) + pvdHash, err := pvd.Hash() + require.NoError(t, err) + + validationCode1 := parachaintypes.ValidationCode{1, 2, 3} + + candidate1 := newCommittedCandidate(t, + paraID, + headData, + povHash, + relayParent, + makeErasureRoot(t, numOfValidators, pov, pvd), + pvdHash, + validationCode1, + ) + + validate := validResponseForValidateFromExhaustive(headData, pvd) + + distribute := func(msg any) bool { + // we have seconded a candidate and shared the statement to peers + share, ok := msg.(parachaintypes.StatementDistributionMessageShare) + if !ok { + return false + } + + statement, err := share.SignedFullStatementWithPVD.SignedFullStatement.Payload.Value() + require.NoError(t, err) + + require.Equal(t, statement, parachaintypes.Seconded(candidate1)) + require.Equal(t, *share.SignedFullStatementWithPVD.PersistedValidationData, pvd) + require.Equal(t, share.RelayParent, relayParent) + + return true + } + + informSeconded := func(msg any) bool { + // informed collator protocol that we have seconded the candidate + _, ok := msg.(collatorprotocolmessages.Seconded) + return ok + } + + overseer.ExpectActions(validate, storeAvailableData, distribute, informSeconded) + + // mocked for candidate1 + mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). + Return(&validationCode1, nil) + + overseer.ReceiveMessage(backing.SecondMessage{ + RelayParent: relayParent, + CandidateReceipt: candidate1.ToPlain(), + PersistedValidationData: pvd, + PoV: pov, + }) + + time.Sleep(1 * time.Second) + + validationCode2 := parachaintypes.ValidationCode{4, 5, 6} + + candidate2 := newCommittedCandidate(t, + paraID, + headData, + povHash, + relayParent, + makeErasureRoot(t, numOfValidators, pov, pvd), + pvdHash, + validationCode2, + ) + + // Validate the candidate, but the candidate is rejected because the leaf is already occupied. + // should not expect `StatementDistributionMessageShare` and `collator protocol messages.Seconded` overseer messages. + overseer.ExpectActions(validate, storeAvailableData) + + // mocked for candidate2 + mockRuntime.EXPECT().ParachainHostValidationCodeByHash(gomock.AssignableToTypeOf(common.Hash{})). + Return(&validationCode2, nil) + + // Try to second candidate with the same relay parent again. + overseer.ReceiveMessage(backing.SecondMessage{ + RelayParent: relayParent, + CandidateReceipt: candidate2.ToPlain(), + PersistedValidationData: pvd, + PoV: pov, + }) + + time.Sleep(2 * time.Second) +} diff --git a/dot/parachain/overseer/mockable_overseer.go b/dot/parachain/overseer/mockable_overseer.go index c64fe75eff..5afe72f59e 100644 --- a/dot/parachain/overseer/mockable_overseer.go +++ b/dot/parachain/overseer/mockable_overseer.go @@ -96,6 +96,9 @@ func (m *MockableOverseer) processMessages() { } actionIndex = actionIndex + 1 + } else { + m.t.Errorf("unexpected message: %T", msg) + return } case <-m.ctx.Done(): if err := m.ctx.Err(); err != nil { From d439cc42bf4c0bf8758e31612cc41cb780180bc4 Mon Sep 17 00:00:00 2001 From: axaysagathiya Date: Tue, 20 Aug 2024 12:12:46 +0530 Subject: [PATCH 2/4] wait after overseer stop, reduce sleep time --- dot/parachain/backing/integration_test.go | 24 ++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/dot/parachain/backing/integration_test.go b/dot/parachain/backing/integration_test.go index b69c97e129..a6546f1df4 100644 --- a/dot/parachain/backing/integration_test.go +++ b/dot/parachain/backing/integration_test.go @@ -5,6 +5,7 @@ package backing_test import ( "errors" + "fmt" "testing" "time" @@ -24,6 +25,12 @@ import ( gomock "go.uber.org/mock/gomock" ) +// Ensure overseer stops before test completion +func stopOverseerAndWaitForCompletion(overseer *overseer.MockableOverseer) { + overseer.Stop() + time.Sleep(100 * time.Millisecond) // Give some time for any ongoing processes to finish +} + // register the backing subsystem, run backing subsystem, start overseer func initBackingAndOverseerMock(t *testing.T) (*backing.CandidateBacking, *overseer.MockableOverseer) { t.Helper() @@ -252,7 +259,7 @@ func storeAvailableData(msg any) bool { // we can second a valid candidate when the previous candidate has been found invalid func TestSecondsValidCandidate(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -355,7 +362,7 @@ func TestSecondsValidCandidate(t *testing.T) { PoV: pov1, }) - time.Sleep(3 * time.Second) + time.Sleep(1 * time.Second) pov2 := parachaintypes.PoV{BlockData: []byte{45, 46, 47}} @@ -408,6 +415,7 @@ func TestSecondsValidCandidate(t *testing.T) { informSeconded := func(msg any) bool { // informed collator protocol that we have seconded the candidate _, ok := msg.(collatorprotocolmessages.Seconded) + fmt.Println("==> informSeconded <==") return ok } @@ -423,14 +431,14 @@ func TestSecondsValidCandidate(t *testing.T) { PoV: pov2, }) - time.Sleep(3 * time.Second) + time.Sleep(1 * time.Second) } // candidate reaches quorum. // in legacy backing, we need 2 approvals to reach quorum. func TestCandidateReachesQuorum(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -616,15 +624,13 @@ func TestCandidateReachesQuorum(t *testing.T) { // as it is a valid statement, we do not validate the candidate, just store into the statement table. require.Len(t, backableCandidates, 1) require.Len(t, backableCandidates[0].ValidityVotes, 3) - - time.Sleep(3 * time.Second) } // if the validation of the candidate has failed this does not stop the work of this subsystem // and so it is not fatal to the node. func TestValidationFailDoesNotStopSubsystem(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -767,7 +773,7 @@ func TestValidationFailDoesNotStopSubsystem(t *testing.T) { // It's impossible to second multiple candidates per relay parent without prospective parachains. func TestCanNotSecondMultipleCandidatesPerRelayParent(t *testing.T) { candidateBacking, overseer := initBackingAndOverseerMock(t) - defer overseer.Stop() + defer stopOverseerAndWaitForCompletion(overseer) paraValidators := parachainValidators(t, candidateBacking.Keystore) numOfValidators := uint(len(paraValidators)) @@ -904,5 +910,5 @@ func TestCanNotSecondMultipleCandidatesPerRelayParent(t *testing.T) { PoV: pov, }) - time.Sleep(2 * time.Second) + time.Sleep(1 * time.Second) } From 6b0e277e48a2e5032dfed2d1b02fa042f20794a1 Mon Sep 17 00:00:00 2001 From: axaysagathiya Date: Tue, 20 Aug 2024 12:17:39 +0530 Subject: [PATCH 3/4] cleanup --- dot/parachain/backing/integration_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/dot/parachain/backing/integration_test.go b/dot/parachain/backing/integration_test.go index a6546f1df4..3c47fbb3f0 100644 --- a/dot/parachain/backing/integration_test.go +++ b/dot/parachain/backing/integration_test.go @@ -5,7 +5,6 @@ package backing_test import ( "errors" - "fmt" "testing" "time" @@ -415,7 +414,6 @@ func TestSecondsValidCandidate(t *testing.T) { informSeconded := func(msg any) bool { // informed collator protocol that we have seconded the candidate _, ok := msg.(collatorprotocolmessages.Seconded) - fmt.Println("==> informSeconded <==") return ok } From 3579452c8a52a97b0c3173317b49116caae2a9bb Mon Sep 17 00:00:00 2001 From: axaysagathiya Date: Thu, 22 Aug 2024 19:35:01 +0530 Subject: [PATCH 4/4] fix collator protocol tests --- .../collator-protocol/message_test.go | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/dot/parachain/collator-protocol/message_test.go b/dot/parachain/collator-protocol/message_test.go index e00c8a8122..9dd209a0bd 100644 --- a/dot/parachain/collator-protocol/message_test.go +++ b/dot/parachain/collator-protocol/message_test.go @@ -19,7 +19,6 @@ import ( "github.com/ChainSafe/gossamer/dot/network" collatorprotocolmessages "github.com/ChainSafe/gossamer/dot/parachain/collator-protocol/messages" networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" - "github.com/ChainSafe/gossamer/dot/parachain/overseer" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/dot/peerset" ) @@ -375,17 +374,22 @@ func TestHandleCollationMessageDeclare(t *testing.T) { c := c t.Run(c.description, func(t *testing.T) { t.Parallel() + + subsystemToOverseer := make(chan any) cpvs := CollatorProtocolValidatorSide{ - peerData: c.peerData, - currentAssignments: c.currentAssignments, + SubSystemToOverseer: subsystemToOverseer, + peerData: c.peerData, + currentAssignments: c.currentAssignments, } - mockOverseer := overseer.NewMockableOverseer(t) - mockOverseer.RegisterSubsystem(&cpvs) - cpvs.SubSystemToOverseer = mockOverseer.GetSubsystemToOverseerChannel() - - mockOverseer.Start() - defer mockOverseer.Stop() + // ensure that the expected messages are sent to the overseer + if len(c.expectedMessages) > 0 { + go func() { + for _, expectedMessage := range c.expectedMessages { + require.Equal(t, expectedMessage, <-subsystemToOverseer) + } + }() + } msg := collatorprotocolmessages.NewCollationProtocol() vdtChild := collatorprotocolmessages.NewCollatorProtocolMessage() @@ -444,7 +448,6 @@ func TestHandleCollationMessageAdvertiseCollation(t *testing.T) { }, errString: ErrRelayParentUnknown.Error(), }, - { description: "fail with unknown peer if peer is not tracked in our list of active collators", advertiseCollation: collatorprotocolmessages.AdvertiseCollation(testRelayParent), @@ -574,19 +577,21 @@ func TestHandleCollationMessageAdvertiseCollation(t *testing.T) { t.Run(c.description, func(t *testing.T) { t.Parallel() + subsystemToOverseer := make(chan any) cpvs := CollatorProtocolValidatorSide{ - net: c.net, - perRelayParent: c.perRelayParent, - peerData: c.peerData, - activeLeaves: c.activeLeaves, + SubSystemToOverseer: subsystemToOverseer, + net: c.net, + perRelayParent: c.perRelayParent, + peerData: c.peerData, + activeLeaves: c.activeLeaves, } - mockOverseer := overseer.NewMockableOverseer(t) - mockOverseer.RegisterSubsystem(&cpvs) - cpvs.SubSystemToOverseer = mockOverseer.GetSubsystemToOverseerChannel() - - mockOverseer.Start() - defer mockOverseer.Stop() + // ensure that the expected messages are sent to the overseer + if c.expectedMessage != nil { + go func() { + require.Equal(t, c.expectedMessage, <-subsystemToOverseer) + }() + } msg := collatorprotocolmessages.NewCollationProtocol() vdtChild := collatorprotocolmessages.NewCollatorProtocolMessage() @@ -604,7 +609,6 @@ func TestHandleCollationMessageAdvertiseCollation(t *testing.T) { } else { require.ErrorContains(t, err, c.errString) } - }) } }