Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test(swamp): pruning FN blob sync #3464

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 117 additions & 22 deletions nodebuilder/tests/prune_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,14 @@ import (
// an archival node in a network dominated by pruned nodes.
//
// 1 BN w/ pruning, 3 FN w/ pruning, 1 FN archival

// turn on archival BN
// archival FN syncs against BN
// turn off archival BN
// turn on pruning BN
// spin up 3 pruning FNs, connect
// spin up 1 LN that syncs historic blobs
//
// Steps:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added step numbers to follow in the test code.

// 1. turn on archival BN
// 2. archival FN syncs against BN
// 3. turn off archival BN
// 4. turn on pruning BN
// 5. spin up 3 pruning FNs, connect
// 6. spin up 1 LN that syncs historic blobs
func TestArchivalBlobSync(t *testing.T) {
const (
blocks = 50
Expand All @@ -44,18 +45,26 @@ func TestArchivalBlobSync(t *testing.T) {
sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)

// step 1.
archivalBN := sw.NewBridgeNode()
sw.SetBootstrapper(t, archivalBN)

err := archivalBN.Start(ctx)
require.NoError(t, err)

// step 2.
archivalFN := sw.NewFullNode()
err = archivalFN.Start(ctx)
require.NoError(t, err)

require.NoError(t, <-fillDn)

// step 3.
// stop the archival BN to force LN to have to discover
// the archival FN later
err = archivalBN.Stop(ctx)
require.NoError(t, err)

pruningCfg := nodebuilder.DefaultConfig(node.Bridge)
pruningCfg.Pruner.EnableService = true

Expand All @@ -64,11 +73,7 @@ func TestArchivalBlobSync(t *testing.T) {
fx.Replace(testAvailWindow),
)

// stop the archival BN to force LN to have to discover
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved this higher where it should be.

// the archival FN later
err = archivalBN.Stop(ctx)
require.NoError(t, err)

// step 4.
pruningBN := sw.NewNodeWithConfig(node.Bridge, pruningCfg, prunerOpts)
sw.SetBootstrapper(t, pruningBN)
err = pruningBN.Start(ctx)
Expand All @@ -77,10 +82,12 @@ func TestArchivalBlobSync(t *testing.T) {
err = archivalFN.Host.Connect(ctx, *host.InfoFromHost(pruningBN.Host))
require.NoError(t, err)

// step 5.
const numFNs = 3
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Extracted as a const.

pruningCfg.DASer = das.DefaultConfig(node.Full)
pruningCfg.Pruner.EnableService = true
pruningFulls := make([]*nodebuilder.Node, 0, 3)
for i := 0; i < 3; i++ {
pruningFulls := make([]*nodebuilder.Node, 0)
for i := 0; i < numFNs; i++ {
pruningFN := sw.NewNodeWithConfig(node.Full, pruningCfg, prunerOpts)
err = pruningFN.Start(ctx)
require.NoError(t, err)
Expand All @@ -94,14 +101,14 @@ func TestArchivalBlobSync(t *testing.T) {
root share.DataHash
}

const wantBlobs = 10
archivalBlobs := make([]*archivalBlob, 0)
i := 1
for {

for i := 1; len(archivalBlobs) < wantBlobs; i++ {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We just need to collect wantBlobs here, i is a simple increment that can be done in a loop declaration.

eh, err := archivalFN.HeaderServ.GetByHeight(ctx, uint64(i))
require.NoError(t, err)

if bytes.Equal(eh.DataHash, share.EmptyRoot().Hash()) {
i++
continue
}

Expand All @@ -118,11 +125,6 @@ func TestArchivalBlobSync(t *testing.T) {
height: uint64(i),
root: eh.DAH.Hash(),
})

if len(archivalBlobs) > 10 {
break
}
i++
}

// ensure pruned FNs don't have the blocks associated
Expand All @@ -135,6 +137,7 @@ func TestArchivalBlobSync(t *testing.T) {
}
}

// step 6.
ln := sw.NewLightNode(prunerOpts)
err = ln.Start(ctx)
require.NoError(t, err)
Expand All @@ -150,3 +153,95 @@ func TestArchivalBlobSync(t *testing.T) {
assert.Equal(t, b.blob.Data, got.Data)
}
}

// Pruning_FN from only archival_FN.
//
// Steps:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure should there be more steps to verify all logic paths 🤷

// 1. turn on archival FN
// 2. pruning FN syncs against archival FN
// 3. spin up 1 LN that syncs historic blobs
func TestPruningFNBlobSync(t *testing.T) {
const (
blocks = 50
btime = 300 * time.Millisecond
bsize = 16
)

ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout)
t.Cleanup(cancel)

sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime))
fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks)

// step 1.
archivalBN := sw.NewBridgeNode()
sw.SetBootstrapper(t, archivalBN)

err := archivalBN.Start(ctx)
require.NoError(t, err)

// step 2.
pruningCfg := nodebuilder.DefaultConfig(node.Full)
pruningCfg.Pruner.EnableService = true

testAvailWindow := pruner.AvailabilityWindow(time.Millisecond)
prunerOpts := fx.Options(
fx.Replace(testAvailWindow),
)

pruningFN := sw.NewNodeWithConfig(node.Full, pruningCfg, prunerOpts)
sw.SetBootstrapper(t, pruningFN)

err = pruningFN.Start(ctx)
require.NoError(t, err)

require.NoError(t, <-fillDn)

type archivalBlob struct {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably this can be named as a separate step.

blob *blob.Blob
height uint64
root share.DataHash
}

const wantBlobs = 10
archivalBlobs := make([]*archivalBlob, 0)

for i := 1; len(archivalBlobs) < wantBlobs; i++ {
eh, err := pruningFN.HeaderServ.GetByHeight(ctx, uint64(i))
require.NoError(t, err)

if bytes.Equal(eh.DataHash, share.EmptyRoot().Hash()) {
continue
}

shr, err := pruningFN.ShareServ.GetShare(ctx, eh, 2, 2)
require.NoError(t, err)
ns, err := share.NamespaceFromBytes(shr[:share.NamespaceSize])
require.NoError(t, err)

blobs, err := pruningFN.BlobServ.GetAll(ctx, uint64(i), []share.Namespace{ns})
require.NoError(t, err)

archivalBlobs = append(archivalBlobs, &archivalBlob{
blob: blobs[0],
height: uint64(i),
root: eh.DAH.Hash(),
})
}

// step 3.
ln := sw.NewLightNode(prunerOpts)
err = ln.Start(ctx)
require.NoError(t, err)

// ensure LN can retrieve all blobs from the pruning FN
for _, b := range archivalBlobs {
_, err := ln.HeaderServ.WaitForHeight(ctx, b.height)
require.NoError(t, err)

got, err := ln.BlobServ.Get(ctx, b.height, b.blob.Namespace(), b.blob.Commitment)
require.NoError(t, err)
assert.Equal(t, b.blob.Commitment, got.Commitment)
assert.Equal(t, b.blob.Data, got.Data)
}
}
Loading