From 9ed6753f27e2c363eb030296c1803bb01e2dfad9 Mon Sep 17 00:00:00 2001 From: Anton Ilin <48175203+Anton-Rampage@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:45:18 +0300 Subject: [PATCH] Add light node mode (#1270) * Added types * Renamed a file * Changed asset- and waves balances structures * Commented a function * Changed addresses types * Changed balanceAsset * Added a constructor * Added decimals to asset snapshot * Changed uint8 to int8 * Added a field for asset reissuability * Changed the type of total quantity * removed the pointer * Changed the type of orderID * Make 'SnapshotManager' as an interface type. * Replace 'proto.assetID' to 'crypto.Digest'. * Replace 'proto.Address' to 'proto.WavesAddress'. * Changed leaseID type * changed types of leaseIn and leaseOut * Removed the pointer types from AliasSnapshot * Change a bit 'AtomicSnapshot' interface. * Update snapshot types. * Removed unnecessary fields in 'AccountScriptSnapshot' struct. * Remove unnecessary types. * Change 'StaticAssetInfoSnapshot.Issuer' field to 'StaticAssetInfoSnapshot.IssuerPublicKey'. * Rename 'AssetReissuabilitySnapshot' to 'AssetVolumeSnapshot'. * Add feature block reward distribution (#1082) * add feature BlockRewardDistribution * fix tests * Improvement of the test on reward distribution to check that remainder of division goes to miner. * refactor after rewiev * fix feature info and add reward addresses in testnet and stagenet configs * DAO and buyback addresses added for MainNet. Formatting of settings files fixed. --------- Co-authored-by: Alexey Kiselev Co-authored-by: Nikolay Eskov * change actions count with feature BlockRewardDistribution activated (#1088) * change count actions with feature BlockRewardDistribution activated * fix comment in script * Merge fix. * Fix version in comment * Eth transaction refactoring and tests --------- Co-authored-by: Alexey Kiselev Co-authored-by: Nikolay Eskov * Ride add rewards to block info (#1096) * add feature BlockRewardDistribution * fix tests * Improvement of the test on reward distribution to check that remainder of division goes to miner. * refactor after rewiev * fix feature info and add reward addresses in testnet and stagenet configs * change count actions with feature BlockRewardDistribution activated * Ride version 7 added. Extended version of BlockInfo ride object added. Ride types representation in objects stdlib description changed to simple string. Parsing of types added to code generation and updated in compiler. Code generation updated to support Tuple types. * Fixed code generation for RideV7. Fixed compilation of V7 scripts. Added rewards structure and function to SmartState to get the block rewards. Added test on script accessing new rewards field on BlockInfo. * Fixed FunctionCall usage in tests * Fixed sorting of rewards with stable sort. * Script activation check added for RideV7. * Code improvements * Restored recursive check of list types in generated code. * Generation of simplified code for tuple type checks. * Duplicated tests removed * Fixed lib version check in Ride compiler --------- Co-authored-by: Anton Ilin Co-authored-by: Anton Ilin <48175203+Anton-Rampage@users.noreply.github.com> * Fix clear-text logging of sensitive information. (#1128) * Bump golang.org/x/sync from 0.2.0 to 0.3.0 (#1129) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.2.0 to 0.3.0. - [Commits](https://github.com/golang/sync/compare/v0.2.0...v0.3.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alexey Kiselev * Added 'LeaseStateStatus'. Changed types in 'LeaseStateSnapshot'. * Rename 'LeaseStateStatus.Status' to 'LeaseStateStatus.Value'. * Add methods to 'SnapshotManager' interface. Changed 'AtomicSnapshot' interface. * Snapshot applier (#1145) * Create marshal/unmarshal methods for 'leasing' structure in 'pkg/state'. * WIP: rollbackable map. * Return 'balanceProfile' and 'wavesBalanceRecord' by value. * Started 'snapshotApplier.' * Add apply methods for assets snapshot applying. * Add 'snapshotApplier.applySponsorship' method. * Add 'snapshotApplier.applyAccountScript' method. * Refactor 'ordersVolumes'. * Add 'snapshotApplier.applyFilledVolumeAndFee' method. * Add 'snapshotApplier.applyDataEntry' method. * Add 'snapshotApplier.applyLeaseState' method. * Revert "WIP: rollbackable map." This reverts commit cb3496915e101cd683f8174c5e801cbe924d6bca. * Created and used 'SnapshotApplierInfo' interface in 'state' package. * Create 'SnapshotApplier' interface in 'state' package. * Refactor 'SnapshotApplier' interface. * Remove 'SnapshotApplierInfo' interface. * Extracted 'snapshotApplierStorages'. * Rename 'ApplyDataEntry' to 'ApplyDataEntries'. * Replace 'SnapshotManager' to 'SnapshotApplier'. * Node transaction snapshots (#1078) * Added a conversion function * Added payment transaction conversion * Added transfer tx conversion * Transfer tx conversion changed * Added issue and reissue tx conversions * Issue * Issue, reissue, burn, exchange * Hanled lease transactions * Finished performers * Added snapshots for all types of transactions * Fixed types after merging * Fixed issue snapshot mistake * Added rewards snapshot in append block * Changed functions to newest * Added snapshots from actions * Removed todos * Deleted useless code * Fixed a mistake with leasing cancel * Added tests for issue and reissue transactions * Fixed tests * fixed implicit memory aliasing * Added tests for burn, lease, lease cancel, exchange and create alias transactions * Added tests for data, sponsorship, set account and asset script transactions * Added a test for the invoke transaction snapshots * Fixed after merge * Fixed a function * Moved snapshot generation * Remove 'SnapshotManager' interface. * Added a todo * Refactored perfomerInfo * Refactored performer info again * Moved generation to snapshot generator * Fixed some linter issues * Added an asset atomic snapshot for issue transaction * Replaced current recording to storage to snapshot applier * Replaced to snapshot applier, except balances * Fixed a comment * Removed the comment * Removed unnecessary code * Returned the linter command back * Replaced current recording to storage to snapshot applier (#1162) * Replaced current recording to storage to snapshot applier * Replaced to snapshot applier, except balances * Fixed a comment * Removed the comment * Removed unnecessary code * Returned the linter command back * Revert "Replaced current recording to storage to snapshot applier (#1162)" (#1164) This reverts commit 51904b2ff66e4de39d5668461676b9415f879f2d. * Added asset script atomic snapshot to the issue script actions * Fixed some linter issues * Fixed a few more linter issues * Fixed all linter issues * Fixed err shadowing * Fixed a mistake with issue counter * removed a useless line * Fixed applying order transactions * Fixed applying order transactions * Added lising snapshot to the tx diff generated group * Moved complexity saving into transaction performer * Moved complexity saving into transaction performer * removed a line * Fixed a bug with an empty sender * Used error.Is * Set an empty script for issue with sig * add light node states * add snapshot storage * fix some linter issues * move snapshots types to proto * fix linter issues * Fixed a bug with wrong action sender * Fixed a bug with wrong action sender * use vtproto * Add 'TransactionStatusSnapshot'. * Implemented 'AtomicSnapshot' interface for 'TransactionStatusSnapshot'. * Changed 'AssetScriptSnapshot' and 'scriptStorageState.setAssetScript'. Removed 'pk' arg from 'setAssetScript'. Removed two fields from 'AssetScriptSnapshot': - 'SenderPK' - 'Complexity' * * Commented invoke snapshots, import ok (#1165) * Replaced current recording to storage to snapshot applier * Replaced to snapshot applier, except balances * Fixed a comment * Removed the comment * Removed unnecessary code * Returned the linter command back * Added asset script atomic snapshot to the issue script actions * Fixed some linter issues * Fixed a few more linter issues * Fixed all linter issues * Fixed err shadowing * Fixed a mistake with issue counter * removed a useless line * Fixed applying order transactions * Fixed applying order transactions * Added lising snapshot to the tx diff generated group * Moved complexity saving into transaction performer * Moved complexity saving into transaction performer * removed a line * Fixed a bug with an empty sender * Used error.Is * Set an empty script for issue with sig * move snapshots types to proto * Fixed a bug with wrong action sender * Fixed a bug with wrong action sender * Add 'TransactionStatusSnapshot'. * Implemented 'AtomicSnapshot' interface for 'TransactionStatusSnapshot'. * Changed 'AssetScriptSnapshot' and 'scriptStorageState.setAssetScript'. Removed 'pk' arg from 'setAssetScript'. Removed two fields from 'AssetScriptSnapshot': - 'SenderPK' - 'Complexity' * * Commented invoke snapshots, import ok * moved setting complexity and set script to snapshot applier * Fixed a test and a mistake * Fixed renaming * Fixed a comment * Moved dapp complexity to applier * Added dapp complexity to set script as well * Fixed a check mistake * Fixed a test * removed empty script check * removed a todo * Removed useless code * Modified a test * Fixed same keys issue for data entry actions * Remove invoke snapshot (#1235) * moved complexity back * Reverted some changes, import ok * Returned account script and asset script txs, import ? * Uncommented internal snapshot, import ok * Fixed height problem with issue action and issue tx, import NO * Issue action reverted * Added log and issueCounter for issue action * Commented invoke snapshots, import ok --------- Co-authored-by: Nikolay Eskov --------- Co-authored-by: Anton Ilin Co-authored-by: Nikolay Eskov * Added asset script special snapshot * Added internal snapshot for script for issue tx * add tx status snapshot to proto func * remove unnecessary changes * fix test * Separated internal tx snapshots. * Reduced 'internalSnapshot' interface, removed unnecessary code. * add new add block with snapshot * add light node * fix linter issue * fix tests * fix ng state * fix some bugs * Merge fix * fix issues * fix tests * fix itests * add add block with snpashots and new states for snapshots * fix test * Add snapshot hash check for 'txAppender.appendBlock'. * Timeout tasks for wait snapshot states of FSM can be cancelled. * remove copypasts * Fixed bug in 'MicroBlockCache.GetSnapshot'. * Add block snapshots as an optional return parameter for 'newBlocks.current'. * move snapshot requests to defer * Refactor a bit 'newBlocks.current'. * Linter fixes. * Remove irrelevant TODO. * fix channel size for proto msgs * Fix light node fee distribution (#1341) * Fix TODO comment in 'snapshot_generator_internal_test.go'. * Use 'error.As' in 'wrapErr' func according to the linter recommendation. * Use 'errors.Is' according to the linter recommendation. * Rename 'needToRecalc' variable to 'needToRecalculate'. * Add 'default' case for 'switch' in 'txAppender.handleFallible'. * Add TODO for 'txAppender.applySnapshotInLightNode'. * Refactor 'newStateManager' function. * Reuse 'currentBlockInfo' struct in 'applySnapshotInLightNode' and 'appendTxs' methods of 'txAppender'. * Coun miner fee while processing transactions snapshots in 'txAppender.applySnapshotInLightNode'. * Count miner fees after transactions processing. * Fix mistake with wrapping wrong error variable while calculating miner's fees. * Count miner tx fee after tx application. * Count miner tx fees in 'commitTxApplication'. * fix other channel size * fix state name * Add 'cleanupBeforeTransition' deferred calls for 'WaitMicroSnapshotState' and 'WaitSnapshotState'. * Simplified a bit 'syncSkipMessageList' for SYNC in full mode * fix microBlockSnapshot state name * initialize snapshot chache * fix linter * fix linters * Add snapshots to importer (#1290) * Added snapshots import to importer * Added flags * Added snapshots to importer * Importer refactoring. Extracted blockReader and snapshotReader. Linter errors fixed. Blockchain scheme fixed. * Linter action version updated. * Golangci-lint version updated to 1.55.2. * Importer global refactoring. * Use new min and max fucntions from Go 1.21. * Unused newSnapshots type removed. * Small style fix. Additional check on data size while deserializing snapshots. --------- Co-authored-by: Alexey Kiselev * Fix gosec issues. * return to ng if failed to apply block/microblock * add Score in WaitSnapshots States * fix Score check func and reduce timeouts * increse message max lenght and allow negative in/out in lease balance snapshot * fix asset volume decode/encode * fix linters * move ask blockIDs * fix linters * Fix log records. * Fix 'TestTxSnapshotHasher'. * Add new testcase for 'TestTxSnapshotMarshalToPBAndUnmarshalFromPB'. * Remove unnecessary closure from 'selectImporter' im 'cmd/importer/importer.go'. * Optimized a bit 'BlockSnapshot.ToProtobuf'. * Optimized a bit 'NGState.checkAndAppendMicroBlock'. * Optimized a bit 'WaitMicroSnapshotState.checkAndAppendMicroBlock'. * rewrite ApplyFromFile in importer and fix some issues * Remove 'nolint' for 'ProtocolVersion' func. * Add more logs in 'ApplyFromFile' func. * Add sanity checks for 'ImportParams'. * Properly handle error in 'processScoreAfterApplyingOrReturnToNG'. --------- Signed-off-by: dependabot[bot] Co-authored-by: esuwu Co-authored-by: Nikolay Eskov Co-authored-by: Alexey Kiselev Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alexandr Dolgavin <41806871+esuwu@users.noreply.github.com> --- cmd/chaincmp/chaincmp.go | 2 +- cmd/convert/main.go | 5 +- cmd/importer/importer.go | 318 +++++++++++----- cmd/node/node.go | 18 +- cmd/retransmitter/main.go | 2 +- cmd/rollback/main.go | 2 +- cmd/statehash/statehash.go | 2 +- itests/net/connection.go | 4 +- pkg/grpc/generated/waves/events/events.pb.go | 18 +- pkg/grpc/server/common_test.go | 4 +- pkg/importer/blocks_importer.go | 87 +++++ pkg/importer/blocks_reader.go | 54 +++ pkg/importer/importer.go | 142 +++---- pkg/importer/snapshots_importer.go | 108 ++++++ pkg/importer/snapshots_reader.go | 65 ++++ pkg/importer/speed_regulator.go | 53 +++ pkg/libs/microblock_cache/microblock_cache.go | 52 ++- pkg/libs/ordered_blocks/optimistic_load.go | 40 +- .../ordered_blocks/optimistic_load_test.go | 22 +- pkg/metrics/metrics.go | 9 +- pkg/mock/state.go | 58 +++ pkg/node/actions_by_type.go | 132 ++++++- pkg/node/blocks_applier/blocks_applier.go | 252 +++++++++++-- pkg/node/blocks_applier/node_mocks.go | 34 +- pkg/node/fsm/fsm.go | 60 ++- pkg/node/fsm/fsm_common.go | 72 +++- pkg/node/fsm/halt_state.go | 10 +- pkg/node/fsm/idle_state.go | 9 +- pkg/node/fsm/ng/inv_request.go | 4 +- pkg/node/fsm/ng/inv_request_test.go | 1 - pkg/node/fsm/ng_state.go | 225 +++++++---- pkg/node/fsm/persist_state.go | 8 +- pkg/node/fsm/sync_internal/internal.go | 51 ++- pkg/node/fsm/sync_internal/internal_test.go | 18 +- pkg/node/fsm/sync_state.go | 101 +++-- pkg/node/fsm/tasks/tasks.go | 76 ++++ pkg/node/fsm/wait_micro_snapshot.go | 273 ++++++++++++++ pkg/node/fsm/wait_snapshot_state.go | 209 +++++++++++ pkg/node/node.go | 5 +- pkg/node/peers/peer_manager.go | 2 +- pkg/p2p/conn/conn.go | 4 +- pkg/p2p/peer/extension/extension.go | 16 + pkg/p2p/peer/handle_test.go | 6 +- pkg/p2p/peer/peer.go | 20 +- pkg/proto/block_snapshot.go | 55 +++ pkg/proto/proto.go | 294 +++++++++++++-- pkg/proto/protobuf_converters.go | 32 +- pkg/proto/snapshot_types.go | 17 +- pkg/proto/snapshot_types_test.go | 4 + pkg/services/services.go | 26 +- pkg/state/address_transactions_test.go | 2 +- pkg/state/api.go | 12 +- pkg/state/appender.go | 297 +++++++++------ pkg/state/headers_validation_test.go | 5 +- pkg/state/invoke_applier_test.go | 2 +- pkg/state/snapshot_generator_internal_test.go | 2 +- pkg/state/snapshot_hasher_internal_test.go | 2 +- pkg/state/state.go | 355 +++++++++++------- pkg/state/state_test.go | 88 +++-- pkg/state/threadsafe_wrapper.go | 23 +- 60 files changed, 3111 insertions(+), 758 deletions(-) create mode 100644 pkg/importer/blocks_importer.go create mode 100644 pkg/importer/blocks_reader.go create mode 100644 pkg/importer/snapshots_importer.go create mode 100644 pkg/importer/snapshots_reader.go create mode 100644 pkg/importer/speed_regulator.go create mode 100644 pkg/node/fsm/wait_micro_snapshot.go create mode 100644 pkg/node/fsm/wait_snapshot_state.go diff --git a/cmd/chaincmp/chaincmp.go b/cmd/chaincmp/chaincmp.go index f25d0a8e8..06b748417 100644 --- a/cmd/chaincmp/chaincmp.go +++ b/cmd/chaincmp/chaincmp.go @@ -96,7 +96,7 @@ func run() error { for i, u := range other { u, err = checkAndUpdateURL(u) if err != nil { - zap.S().Error("Incorrect reference's URL: %s", err.Error()) + zap.S().Errorf("Incorrect reference's URL: %s", err.Error()) return errInvalidParameters } other[i] = u diff --git a/cmd/convert/main.go b/cmd/convert/main.go index 98473ddd8..d575998f9 100644 --- a/cmd/convert/main.go +++ b/cmd/convert/main.go @@ -5,8 +5,9 @@ import ( "flag" "os" - "github.com/wavesplatform/gowaves/pkg/proto" "go.uber.org/zap" + + "github.com/wavesplatform/gowaves/pkg/proto" ) var ( @@ -28,7 +29,7 @@ func main() { zap.S().Fatal("please, provide scheme-byte argument") } if len(*schemeByte) != 1 { - zap.S().Fatal("invalid scheme-byte argument %q", *schemeByte) + zap.S().Fatalf("invalid scheme-byte argument %q", *schemeByte) } scheme := []byte(*schemeByte)[0] switch *command { diff --git a/cmd/importer/importer.go b/cmd/importer/importer.go index 9c5ad1342..9255bc53e 100644 --- a/cmd/importer/importer.go +++ b/cmd/importer/importer.go @@ -8,11 +8,13 @@ import ( "io" "os" "os/signal" + "path/filepath" "runtime" "runtime/debug" "runtime/pprof" "strings" "time" + "unicode" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -30,156 +32,268 @@ const ( ) func main() { + err := run() + if err != nil { + zap.S().Error(capitalize(err.Error())) + os.Exit(1) + } +} + +type cfg struct { + logLevel *zapcore.Level + cfgPath string + blockchainType string + blockchainPath string + balancesPath string + dataDirPath string + nBlocks int + verificationGoroutinesNum int + writeBufferSize int + buildDataForExtendedAPI bool + buildStateHashes bool + lightNodeMode bool + snapshotsPath string + cpuProfilePath string + memProfilePath string +} + +func parseFlags() (cfg, error) { const ( defaultBlocksNumber = 1000 defaultBufferSize = 16 ) - var ( - logLevel = zap.LevelFlag("log-level", zapcore.InfoLevel, - "Logging level. Supported levels: DEBUG, INFO, WARN, ERROR, FATAL. Default logging level INFO.") - cfgPath = flag.String("cfg-path", "", - "Path to blockchain settings JSON file for custom blockchains. Not set by default.") - blockchainType = flag.String("blockchain-type", "mainnet", - "Blockchain type. Allowed values: mainnet/testnet/stagenet/custom. Default is 'mainnet'.") - blockchainPath = flag.String("blockchain-path", "", "Path to binary blockchain file.") - balancesPath = flag.String("balances-path", "", - "Path to JSON with correct balances after applying blocks.") - dataDirPath = flag.String("data-path", "", "Path to directory with previously created state.") - nBlocks = flag.Int("blocks-number", defaultBlocksNumber, "Number of blocks to import.") - verificationGoroutinesNum = flag.Int("verification-goroutines-num", runtime.NumCPU()*2, - " Number of goroutines that will be run for verification of transactions/blocks signatures.") - writeBufferSize = flag.Int("write-buffer", defaultBufferSize, "Write buffer size in MiB.") - buildDataForExtendedAPI = flag.Bool("build-extended-api", false, - "Build and store additional data required for extended API in state. "+ - "WARNING: this slows down the import, use only if you do really need extended API.") - buildStateHashes = flag.Bool("build-state-hashes", false, - "Calculate and store state hashes for each block height.") - // Debug. - cpuProfilePath = flag.String("cpuprofile", "", "Write cpu profile to this file.") - memProfilePath = flag.String("memprofile", "", "Write memory profile to this file.") - ) - + c := cfg{} + c.logLevel = zap.LevelFlag("log-level", zapcore.InfoLevel, + "Logging level. Supported levels: DEBUG, INFO, WARN, ERROR, FATAL. Default logging level INFO.") + flag.StringVar(&c.cfgPath, "cfg-path", "", + "Path to blockchain settings JSON file for custom blockchains. Not set by default.") + flag.StringVar(&c.blockchainType, "blockchain-type", "mainnet", + "Blockchain type. Allowed values: mainnet/testnet/stagenet/custom. Default is 'mainnet'.") + flag.StringVar(&c.blockchainPath, "blockchain-path", "", "Path to binary blockchain file.") + flag.StringVar(&c.balancesPath, "balances-path", "", + "Path to JSON with correct balances after applying blocks.") + flag.StringVar(&c.dataDirPath, "data-path", "", "Path to directory with previously created state.") + flag.IntVar(&c.nBlocks, "blocks-number", defaultBlocksNumber, "Number of blocks to import.") + flag.IntVar(&c.verificationGoroutinesNum, "verification-goroutines-num", runtime.NumCPU()*2, + " Number of goroutines that will be run for verification of transactions/blocks signatures.") + flag.IntVar(&c.writeBufferSize, "write-buffer", defaultBufferSize, "Write buffer size in MiB.") + flag.BoolVar(&c.buildDataForExtendedAPI, "build-extended-api", false, + "Build and store additional data required for extended API in state. "+ + "WARNING: this slows down the import, use only if you do really need extended API.") + flag.BoolVar(&c.buildStateHashes, "build-state-hashes", false, + "Calculate and store state hashes for each block height.") + flag.BoolVar(&c.lightNodeMode, "light-node", false, + "Run the node in the light mode in which snapshots are imported without validation") + flag.StringVar(&c.snapshotsPath, "snapshots-path", "", "Path to binary snapshots file.") + // Debug. + flag.StringVar(&c.cpuProfilePath, "cpuprofile", "", "Write cpu profile to this file.") + flag.StringVar(&c.memProfilePath, "memprofile", "", "Write memory profile to this file.") flag.Parse() - logger := logging.SetupSimpleLogger(*logLevel) - defer func() { - err := logger.Sync() - if err != nil && errors.Is(err, os.ErrInvalid) { - panic(fmt.Sprintf("Failed to close logging subsystem: %v\n", err)) + if c.blockchainPath == "" { + return cfg{}, errors.New("option blockchain-path is not specified, please specify it") + } + if c.dataDirPath == "" { + return cfg{}, errors.New("option data-path is not specified, please specify it") + } + if c.lightNodeMode && c.snapshotsPath == "" { + return cfg{}, errors.New("option snapshots-path is not specified in light mode, please specify it") + } + + return c, nil +} + +func (c *cfg) params(maxFDs int) state.StateParams { + const clearance = 10 + params := state.DefaultStateParams() + params.StorageParams.DbParams.OpenFilesCacheCapacity = maxFDs - clearance + params.VerificationGoroutinesNum = c.verificationGoroutinesNum + params.DbParams.WriteBuffer = c.writeBufferSize * MiB + params.StoreExtendedApiData = c.buildDataForExtendedAPI + params.BuildStateHashes = c.buildStateHashes + params.ProvideExtendedApi = false // We do not need to provide any APIs during import. + return params +} + +func (c *cfg) setupLogger() func() { + logger := logging.SetupSimpleLogger(*c.logLevel) + return func() { + if sErr := logger.Sync(); sErr != nil && errors.Is(sErr, os.ErrInvalid) { + zap.S().Errorf("Failed to close logging subsystem: %v", sErr) } - }() - zap.S().Infof("Gowaves Importer version: %s", versioning.Version) + } +} - maxFDs, err := fdlimit.MaxFDs() - if err != nil { - zap.S().Fatalf("Initialization error: %v", err) +func (c *cfg) setupCPUProfile() (func(), error) { + if c.cpuProfilePath == "" { + return func() {}, nil } - _, err = fdlimit.RaiseMaxFDs(maxFDs) + f, err := os.Create(c.cpuProfilePath) if err != nil { - zap.S().Fatalf("Initialization error: %v", err) + return nil, fmt.Errorf("failed to create CPU profile: %w", err) + } + if err = pprof.StartCPUProfile(f); err != nil { + return nil, fmt.Errorf("failed to start CPU profile: %w", err) } + return func() { + pprof.StopCPUProfile() + if clErr := f.Close(); clErr != nil { + zap.S().Errorf("Failed to close CPU profile: %v", clErr) + } + }, nil +} - if *blockchainPath == "" { - zap.S().Fatalf("You must specify blockchain-path option.") +func run() error { + c, err := parseFlags() + if err != nil { + return err } - if *dataDirPath == "" { - zap.S().Fatalf("You must specify data-path option.") + + logSync := c.setupLogger() + defer logSync() + + zap.S().Infof("Gowaves Importer version: %s", versioning.Version) + + fds, err := riseFDLimit() + if err != nil { + return err } // Debug. - if *cpuProfilePath != "" { - f, err := os.Create(*cpuProfilePath) - if err != nil { - zap.S().Fatal("Could not create CPU profile: ", err) - } - defer func() { _ = f.Close() }() - if err := pprof.StartCPUProfile(f); err != nil { - zap.S().Fatal("Could not start CPU profile: ", err) - } - defer pprof.StopCPUProfile() + cpfClose, err := c.setupCPUProfile() + if err != nil { + return err } + defer cpfClose() // https://godoc.org/github.com/coocood/freecache#NewCache debug.SetGCPercent(20) - var ss *settings.BlockchainSettings - if strings.ToLower(*blockchainType) == "custom" && *cfgPath != "" { - f, err := os.Open(*cfgPath) - if err != nil { - zap.S().Fatalf("Failed to open custom blockchain settings: %v", err) - } - defer func() { _ = f.Close() }() - ss, err = settings.ReadBlockchainSettings(f) - if err != nil { - zap.S().Fatalf("Failed to read custom blockchain settings: %v", err) - } - } else { - ss, err = settings.BlockchainSettingsByTypeName(*blockchainType) - if err != nil { - zap.S().Fatalf("Failed to load blockchain settings: %v", err) - } - } - params := state.DefaultStateParams() - params.StorageParams.DbParams.OpenFilesCacheCapacity = int(maxFDs - 10) - params.VerificationGoroutinesNum = *verificationGoroutinesNum - params.DbParams.WriteBuffer = *writeBufferSize * MiB - params.StoreExtendedApiData = *buildDataForExtendedAPI - params.BuildStateHashes = *buildStateHashes - // We do not need to provide any APIs during import. - params.ProvideExtendedApi = false - - st, err := state.NewState(*dataDirPath, false, params, ss) + ss, err := configureBlockchainSettings(c.blockchainType, c.cfgPath) if err != nil { - zap.S().Fatalf("Failed to create state: %v", err) + return err } + st, err := state.NewState(c.dataDirPath, false, c.params(fds), ss, false) + if err != nil { + return fmt.Errorf("failed to create state: %w", err) + } defer func() { - if err := st.Close(); err != nil { - zap.S().Fatalf("Failed to close State: %v", err) + if clErr := st.Close(); clErr != nil { + zap.S().Errorf("Failed to close State: %v", clErr) } }() height, err := st.Height() if err != nil { - zap.S().Fatalf("Failed to get current height: %v", err) + return fmt.Errorf("failed to get current height: %w", err) } + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() + params := importer.ImportParams{ + Schema: ss.AddressSchemeCharacter, + BlockchainPath: c.blockchainPath, + SnapshotsPath: c.snapshotsPath, + LightNodeMode: c.lightNodeMode, + } + start := time.Now() - applyErr := importer.ApplyFromFile(ctx, st, *blockchainPath, uint64(*nBlocks), height) - if applyErr != nil { + if impErr := importer.ApplyFromFile(ctx, params, st, uint64(c.nBlocks), height); impErr != nil { currentHeight, hErr := st.Height() if hErr != nil { zap.S().Fatalf("Failed to get current height: %v", hErr) } - switch { - case errors.Is(applyErr, context.Canceled): - zap.S().Infof("Interrupted by user, height %d", currentHeight) - case errors.Is(applyErr, io.EOF): - zap.S().Info("End of blockchain file reached, height %d", currentHeight) - default: - zap.S().Fatalf("Failed to apply blocks after height %d: %v", currentHeight, applyErr) - } + handleError(impErr, currentHeight) } elapsed := time.Since(start) zap.S().Infof("Import took %s", elapsed) - if len(*balancesPath) != 0 { - if err := importer.CheckBalances(st, *balancesPath); err != nil { - zap.S().Fatalf("Balances check failed: %v", err) + + if len(c.balancesPath) != 0 { + if balErr := importer.CheckBalances(st, c.balancesPath); balErr != nil { + return fmt.Errorf("failed to check balances: %w", balErr) } } // Debug. - if *memProfilePath != "" { - f, err := os.Create(*memProfilePath) + if mpfErr := configureMemProfile(c.memProfilePath); mpfErr != nil { + return mpfErr + } + + return nil +} + +func handleError(err error, height uint64) { + switch { + case errors.Is(err, context.Canceled): + zap.S().Infof("Interrupted by user, height %d", height) + case errors.Is(err, io.EOF): + zap.S().Infof("End of blockchain file reached, height %d", height) + default: + zap.S().Fatalf("Failed to apply blocks after height %d: %v", height, err) + } +} + +func configureMemProfile(memProfilePath string) error { + if memProfilePath == "" { + return nil + } + f, err := os.Create(filepath.Clean(memProfilePath)) + if err != nil { + return fmt.Errorf("failed to create memory profile: %w", err) + } + defer func() { + if clErr := f.Close(); clErr != nil { + zap.S().Errorf("Failed to close memory profile: %v", clErr) + } + }() + runtime.GC() // get up-to-date statistics + if err = pprof.WriteHeapProfile(f); err != nil { + return fmt.Errorf("failed to write memory profile: %w", err) + } + return nil +} + +func configureBlockchainSettings(blockchainType, cfgPath string) (*settings.BlockchainSettings, error) { + var ss *settings.BlockchainSettings + if strings.ToLower(blockchainType) == "custom" && cfgPath != "" { + f, err := os.Open(filepath.Clean(cfgPath)) if err != nil { - zap.S().Fatal("Could not create memory profile: ", err) + return nil, fmt.Errorf("failed to open custom blockchain settings: %w", err) } - defer func() { _ = f.Close() }() - runtime.GC() // get up-to-date statistics - if err := pprof.WriteHeapProfile(f); err != nil { - zap.S().Fatal("Could not write memory profile: ", err) + defer func() { + if clErr := f.Close(); clErr != nil { + zap.S().Errorf("Failed to close custom blockchain settings: %v", clErr) + } + }() + ss, err = settings.ReadBlockchainSettings(f) + if err != nil { + return nil, fmt.Errorf("failed to read custom blockchain settings: %w", err) } + return ss, nil + } + ss, err := settings.BlockchainSettingsByTypeName(blockchainType) + if err != nil { + return nil, fmt.Errorf("failed to load blockchain settings: %w", err) } + return ss, nil +} + +func riseFDLimit() (int, error) { + maxFDs, err := fdlimit.MaxFDs() + if err != nil { + return 0, fmt.Errorf("failed to initialize importer: %w", err) + } + _, err = fdlimit.RaiseMaxFDs(maxFDs) + if err != nil { + return 0, fmt.Errorf("failed to initialize importer: %w", err) + } + return int(maxFDs), nil +} + +func capitalize(str string) string { + runes := []rune(str) + runes[0] = unicode.ToUpper(runes[0]) + return string(runes) } diff --git a/cmd/node/node.go b/cmd/node/node.go index 58a778396..1ab5d4789 100644 --- a/cmd/node/node.go +++ b/cmd/node/node.go @@ -107,6 +107,7 @@ type config struct { newConnectionsLimit int disableNTP bool microblockInterval time.Duration + enableLightMode bool } func (c *config) logParameters() { @@ -142,6 +143,7 @@ func (c *config) logParameters() { zap.S().Debugf("enable-metamask: %t", c.enableMetaMaskAPI) zap.S().Debugf("disable-ntp: %t", c.disableNTP) zap.S().Debugf("microblock-interval: %s", c.microblockInterval) + zap.S().Debugf("enable-light-mode: %t", c.enableLightMode) } func (c *config) parse() { @@ -233,6 +235,8 @@ func (c *config) parse() { "Disable NTP synchronization. Useful when running the node in a docker container.") flag.DurationVar(&c.microblockInterval, "microblock-interval", defaultMicroblockInterval, "Interval between microblocks.") + flag.BoolVar(&c.enableLightMode, "enable-light-mode", false, + "Start node in light mode") flag.Parse() c.logLevel = *l } @@ -381,9 +385,9 @@ func main() { params.Time = ntpTime params.DbParams.BloomFilterParams.Disable = nc.disableBloomFilter - st, err := state.NewState(path, true, params, cfg) + st, err := state.NewState(path, true, params, cfg, nc.enableLightMode) if err != nil { - zap.S().Error("Failed to initialize node's state: %v", err) + zap.S().Errorf("Failed to initialize node's state: %v", err) return } @@ -417,7 +421,7 @@ func main() { return } utx := utxpool.New(uint64(1024*mb), utxValidator, cfg) - parent := peer.NewParent() + parent := peer.NewParent(nc.enableLightMode) nodeNonce, err := rand.Int(rand.Reader, new(big.Int).SetUint64(math.MaxInt32)) if err != nil { @@ -425,7 +429,7 @@ func main() { return } peerSpawnerImpl := peers.NewPeerSpawner(parent, conf.WavesNetwork, declAddr, nc.nodeName, - nodeNonce.Uint64(), proto.ProtocolVersion) + nodeNonce.Uint64(), proto.ProtocolVersion()) peerStorage, err := peersPersistentStorage.NewCBORStorage(nc.statePath, time.Now()) if err != nil { zap.S().Errorf("Failed to open or create peers storage: %v", err) @@ -443,7 +447,7 @@ func main() { peerSpawnerImpl, peerStorage, int(nc.limitAllConnections/2), - proto.ProtocolVersion, + proto.ProtocolVersion(), conf.WavesNetwork, !nc.disableOutgoingConnections, nc.newConnectionsLimit, @@ -480,7 +484,7 @@ func main() { LoggableRunner: logRunner, Time: ntpTime, Wallet: wal, - MicroBlockCache: microblock_cache.NewMicroblockCache(), + MicroBlockCache: microblock_cache.NewMicroBlockCache(), InternalChannel: messages.NewInternalChannel(), MinPeersMining: nc.minPeersMining, SkipMessageList: parent.SkipMessageList, @@ -492,7 +496,7 @@ func main() { ntw, networkInfoCh := network.NewNetwork(svs, parent, nc.obsolescencePeriod) go ntw.Run(ctx) - n := node.NewNode(svs, declAddr, bindAddr, nc.microblockInterval) + n := node.NewNode(svs, declAddr, bindAddr, nc.microblockInterval, nc.enableLightMode) go n.Run(ctx, parent, svs.InternalChannel, networkInfoCh, ntw.SyncPeer()) go minerScheduler.Reschedule() diff --git a/cmd/retransmitter/main.go b/cmd/retransmitter/main.go index 07f619586..7ae2cd9a3 100644 --- a/cmd/retransmitter/main.go +++ b/cmd/retransmitter/main.go @@ -131,7 +131,7 @@ func main() { return } - parent := peer.NewParent() + parent := peer.NewParent(false) spawner := retransmit.NewPeerSpawner(skipUselessMessages, parent, wavesNetwork, declAddr) scheme := schemes[wavesNetwork] behaviour := retransmit.NewBehaviour(knownPeers, spawner, scheme) diff --git a/cmd/rollback/main.go b/cmd/rollback/main.go index b637f795b..4116e6335 100644 --- a/cmd/rollback/main.go +++ b/cmd/rollback/main.go @@ -75,7 +75,7 @@ func main() { params.BuildStateHashes = *buildStateHashes params.StoreExtendedApiData = *buildExtendedAPI - s, err := state.NewState(*statePath, true, params, cfg) + s, err := state.NewState(*statePath, true, params, cfg, false) if err != nil { zap.S().Error(err) return diff --git a/cmd/statehash/statehash.go b/cmd/statehash/statehash.go index 9087fda4b..3c98d1188 100644 --- a/cmd/statehash/statehash.go +++ b/cmd/statehash/statehash.go @@ -106,7 +106,7 @@ func run() error { params.BuildStateHashes = true params.ProvideExtendedApi = false - st, err := state.NewState(statePath, false, params, ss) + st, err := state.NewState(statePath, false, params, ss, false) if err != nil { zap.S().Errorf("Failed to open state at '%s': %v", statePath, err) return err diff --git a/itests/net/connection.go b/itests/net/connection.go index 2411ad5f7..f4e19082c 100644 --- a/itests/net/connection.go +++ b/itests/net/connection.go @@ -84,11 +84,11 @@ func NewNodeConnections(p *d.Ports) (NodeConnections, error) { } func establishConnections(p *d.Ports) (NodeConnections, error) { - goCon, err := NewConnection(proto.TCPAddr{}, d.Localhost+":"+p.Go.BindPort, proto.ProtocolVersion, "wavesL") + goCon, err := NewConnection(proto.TCPAddr{}, d.Localhost+":"+p.Go.BindPort, proto.ProtocolVersion(), "wavesL") if err != nil { return NodeConnections{}, errors.Wrap(err, "failed to create connection to go node") } - scalaCon, err := NewConnection(proto.TCPAddr{}, d.Localhost+":"+p.Scala.BindPort, proto.ProtocolVersion, "wavesL") + scalaCon, err := NewConnection(proto.TCPAddr{}, d.Localhost+":"+p.Scala.BindPort, proto.ProtocolVersion(), "wavesL") if err != nil { if closeErr := goCon.Close(); closeErr != nil { err = errors.Wrap(err, closeErr.Error()) diff --git a/pkg/grpc/generated/waves/events/events.pb.go b/pkg/grpc/generated/waves/events/events.pb.go index 115cee1ee..466e592c0 100644 --- a/pkg/grpc/generated/waves/events/events.pb.go +++ b/pkg/grpc/generated/waves/events/events.pb.go @@ -1146,12 +1146,11 @@ type StateUpdate_AssetDetails struct { LastUpdated int32 `protobuf:"varint,11,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` SequenceInBlock int32 `protobuf:"varint,12,opt,name=sequence_in_block,json=sequenceInBlock,proto3" json:"sequence_in_block,omitempty"` IssueHeight int32 `protobuf:"varint,13,opt,name=issue_height,json=issueHeight,proto3" json:"issue_height,omitempty"` - // - //Related to a past behavior in the blockchain when it was possible to reissue - //assets so that the total volume became more then int64 max value. - //This field represents accurate volume even for those assets. - //Can be ignored if the target system does not need such accuracy. - //Encoding: like Java BigInteger, https://docs.oracle.com/javase/7/docs/api/java/math/BigInteger.html#toByteArray() + // Related to a past behavior in the blockchain when it was possible to reissue + // assets so that the total volume became more then int64 max value. + // This field represents accurate volume even for those assets. + // Can be ignored if the target system does not need such accuracy. + // Encoding: like Java BigInteger, https://docs.oracle.com/javase/7/docs/api/java/math/BigInteger.html#toByteArray() SafeVolume []byte `protobuf:"bytes,20,opt,name=safe_volume,json=safeVolume,proto3" json:"safe_volume,omitempty"` } @@ -1565,10 +1564,9 @@ type TransactionMetadata_ExchangeMetadata struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // - //Fields starting with `order_*` represent order metadata. - //Each of them is a collection of exactly 2 elements. - //Element indexes correspond to their parent order indexes in the exchange transaction. + // Fields starting with `order_*` represent order metadata. + // Each of them is a collection of exactly 2 elements. + // Element indexes correspond to their parent order indexes in the exchange transaction. OrderIds [][]byte `protobuf:"bytes,1,rep,name=order_ids,json=orderIds,proto3" json:"order_ids,omitempty"` OrderSenderAddresses [][]byte `protobuf:"bytes,2,rep,name=order_sender_addresses,json=orderSenderAddresses,proto3" json:"order_sender_addresses,omitempty"` OrderSenderPublicKeys [][]byte `protobuf:"bytes,3,rep,name=order_sender_public_keys,json=orderSenderPublicKeys,proto3" json:"order_sender_public_keys,omitempty"` diff --git a/pkg/grpc/server/common_test.go b/pkg/grpc/server/common_test.go index ea438b828..effc572ed 100644 --- a/pkg/grpc/server/common_test.go +++ b/pkg/grpc/server/common_test.go @@ -86,7 +86,7 @@ func stateWithCustomGenesis(t *testing.T, genesisPath string) state.State { // Activate data transactions. sets.PreactivatedFeatures = []int16{5} params := defaultStateParams() - st, err := state.NewState(dataDir, true, params, sets) + st, err := state.NewState(dataDir, true, params, sets, false) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, st.Close()) @@ -120,7 +120,7 @@ func withAutoCancel(t *testing.T, ctx context.Context) context.Context { func newTestState(t *testing.T, amend bool, params state.StateParams, settings *settings.BlockchainSettings) state.State { dataDir := t.TempDir() - st, err := state.NewState(dataDir, amend, params, settings) + st, err := state.NewState(dataDir, amend, params, settings, false) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, st.Close()) diff --git a/pkg/importer/blocks_importer.go b/pkg/importer/blocks_importer.go new file mode 100644 index 000000000..74c987e02 --- /dev/null +++ b/pkg/importer/blocks_importer.go @@ -0,0 +1,87 @@ +package importer + +import ( + "context" + "fmt" + "time" + + "github.com/wavesplatform/gowaves/pkg/proto" +) + +type BlocksImporter struct { + scheme proto.Scheme + st State + + br *blocksReader + reg *speedRegulator + + h uint64 +} + +func NewBlocksImporter(scheme proto.Scheme, st State, blocksPath string) (*BlocksImporter, error) { + br, err := newBlocksReader(blocksPath) + if err != nil { + return nil, fmt.Errorf("failed to create blocks importer: %w", err) + } + return &BlocksImporter{scheme: scheme, st: st, br: br, reg: newSpeedRegulator()}, nil +} + +func (imp *BlocksImporter) SkipToHeight(ctx context.Context, height proto.Height) error { + imp.h = uint64(1) + if height < imp.h { + return fmt.Errorf("invalid initial height: %d", height) + } + for { + if ctx.Err() != nil { + return ctx.Err() + } + if imp.h == height { + return nil + } + size, err := imp.br.readSize() + if err != nil { + return fmt.Errorf("failed to skip to height %d: %w", height, err) + } + imp.reg.updateTotalSize(size) + imp.br.skip(size) + imp.h++ + } +} + +func (imp *BlocksImporter) Import(ctx context.Context, number uint64) error { + var blocks [MaxBlocksBatchSize][]byte + index := uint64(0) + for height := imp.h; height <= number; height++ { + if ctx.Err() != nil { + return ctx.Err() + } + size, err := imp.br.readSize() + if err != nil { + return fmt.Errorf("failed to import: %w", err) + } + imp.reg.updateTotalSize(size) + block, err := imp.br.readBlock(size) + if err != nil { + return fmt.Errorf("failed to import: %w", err) + } + blocks[index] = block + index++ + if imp.reg.incomplete() && (index != MaxBlocksBatchSize) && (height != number) { + continue + } + start := time.Now() + if abErr := imp.st.AddBlocks(blocks[:index]); abErr != nil { + return abErr + } + imp.reg.calculateSpeed(start) + index = 0 + if pErr := maybePersistTxs(imp.st); pErr != nil { + return pErr + } + } + return nil +} + +func (imp *BlocksImporter) Close() error { + return imp.br.close() +} diff --git a/pkg/importer/blocks_reader.go b/pkg/importer/blocks_reader.go new file mode 100644 index 000000000..b2e4c608f --- /dev/null +++ b/pkg/importer/blocks_reader.go @@ -0,0 +1,54 @@ +package importer + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" +) + +type blocksReader struct { + f *os.File + pos int64 +} + +func newBlocksReader(blockchainPath string) (*blocksReader, error) { + f, err := os.Open(filepath.Clean(blockchainPath)) + if err != nil { + return nil, fmt.Errorf("failed to open blocks file: %w", err) + } + return &blocksReader{f: f, pos: 0}, nil +} + +func (br *blocksReader) readSize() (uint32, error) { + buf := make([]byte, uint32Size) + n, err := br.f.ReadAt(buf, br.pos) + if err != nil { + return 0, fmt.Errorf("failed to read block size: %w", err) + } + br.pos += int64(n) + size := binary.BigEndian.Uint32(buf) + if size > MaxBlockSize || size == 0 { + return 0, errors.New("corrupted blockchain file: invalid block size") + } + return size, nil +} + +func (br *blocksReader) skip(size uint32) { + br.pos += int64(size) +} + +func (br *blocksReader) readBlock(size uint32) ([]byte, error) { + buf := make([]byte, size) + n, err := br.f.ReadAt(buf, br.pos) + if err != nil { + return nil, fmt.Errorf("failed to read block: %w", err) + } + br.pos += int64(n) + return buf, nil +} + +func (br *blocksReader) close() error { + return br.f.Close() +} diff --git a/pkg/importer/importer.go b/pkg/importer/importer.go index 46c9af74b..e146a6e95 100644 --- a/pkg/importer/importer.go +++ b/pkg/importer/importer.go @@ -2,11 +2,9 @@ package importer import ( "context" - "encoding/binary" "encoding/json" "os" "path/filepath" - "time" "github.com/pkg/errors" "go.uber.org/zap" @@ -20,15 +18,16 @@ const ( initTotalBatchSize = 5 * MiB sizeAdjustment = 1 * MiB + uint32Size = 4 - MaxTotalBatchSize = 20 * MiB - MaxTotalBatchSizeForNetworkSync = 6 * MiB - MaxBlocksBatchSize = 50000 - MaxBlockSize = 2 * MiB + MaxTotalBatchSize = 20 * MiB + MaxBlocksBatchSize = 50000 + MaxBlockSize = 2 * MiB ) type State interface { AddBlocks(blocks [][]byte) error + AddBlocksWithSnapshots(blocks [][]byte, snapshots []*proto.BlockSnapshot) error WavesAddressesNumber() (uint64, error) WavesBalance(account proto.Recipient) (uint64, error) AssetBalance(account proto.Recipient, assetID proto.AssetID) (uint64, error) @@ -36,6 +35,12 @@ type State interface { PersistAddressTransactions() error } +type Importer interface { + SkipToHeight(context.Context, uint64) error + Import(context.Context, uint64) error + Close() error +} + func maybePersistTxs(st State) error { // Check if we need to persist transactions for extended API. persistTxs, err := st.ShouldPersistAddressTransactions() @@ -48,105 +53,70 @@ func maybePersistTxs(st State) error { return nil } -func calculateNextMaxSizeAndDirection(maxSize int, speed, prevSpeed float64, increasingSize bool) (int, bool) { - if speed > prevSpeed && increasingSize { - maxSize += sizeAdjustment - if maxSize > MaxTotalBatchSize { - maxSize = MaxTotalBatchSize - } - } else if speed > prevSpeed && !increasingSize { - maxSize -= sizeAdjustment - if maxSize < initTotalBatchSize { - maxSize = initTotalBatchSize - } - } else if speed < prevSpeed && increasingSize { - increasingSize = false - maxSize -= sizeAdjustment - if maxSize < initTotalBatchSize { - maxSize = initTotalBatchSize - } - } else if speed < prevSpeed && !increasingSize { - increasingSize = true - maxSize += sizeAdjustment - if maxSize > MaxTotalBatchSize { - maxSize = MaxTotalBatchSize - } +type ImportParams struct { + Schema proto.Scheme + BlockchainPath, SnapshotsPath string + LightNodeMode bool +} + +func (i ImportParams) validate() error { + if i.Schema == 0 { + return errors.New("scheme/chainID is empty") + } + if i.BlockchainPath == "" { + return errors.New("blockchain path is empty") } - return maxSize, increasingSize + if i.LightNodeMode && i.SnapshotsPath == "" { + return errors.New("snapshots path is empty") + } + return nil } // ApplyFromFile reads blocks from blockchainPath, applying them from height startHeight and until nBlocks+1. // Setting optimize to true speeds up the import, but it is only safe when importing blockchain from scratch // when no rollbacks are possible at all. -func ApplyFromFile( //nolint:gocognit // This function is refactored in another PR, see #1290 +func ApplyFromFile( ctx context.Context, - st State, - blockchainPath string, + params ImportParams, + state State, nBlocks, startHeight uint64, ) error { if ctx == nil { ctx = context.Background() } - blockchain, err := os.Open(blockchainPath) // #nosec: in this case check for prevent G304 (CWE-22) is not necessary + imp, err := selectImporter(params, state) if err != nil { - return errors.Errorf("failed to open blockchain file: %v", err) + return errors.Wrap(err, "failed to create importer") } - defer func() { - if err := blockchain.Close(); err != nil { - zap.S().Fatalf("Failed to close blockchain file: %v", err) + if clErr := imp.Close(); clErr != nil { + zap.S().Fatalf("Failed to close importer: %v", clErr) } }() - sb := make([]byte, 4) - var blocks [MaxBlocksBatchSize][]byte - blocksIndex := 0 - readPos := int64(0) - totalSize := 0 - prevSpeed := float64(0) - increasingSize := true - maxSize := initTotalBatchSize - for height := uint64(1); height <= nBlocks; height++ { - if ctx.Err() != nil { - return ctx.Err() - } - if _, err := blockchain.ReadAt(sb, readPos); err != nil { - return err - } - size := binary.BigEndian.Uint32(sb) - if size > MaxBlockSize || size == 0 { - return errors.New("corrupted blockchain file: invalid block size") - } - totalSize += int(size) - readPos += 4 - if height < startHeight { - readPos += int64(size) - continue - } - block := make([]byte, size) - if _, err := blockchain.ReadAt(block, readPos); err != nil { - return err - } - readPos += int64(size) - blocks[blocksIndex] = block - blocksIndex++ - if (totalSize < maxSize) && (blocksIndex != MaxBlocksBatchSize) && (height != nBlocks) { - continue - } - start := time.Now() - if err := st.AddBlocks(blocks[:blocksIndex]); err != nil { - return err - } - elapsed := time.Since(start) - speed := float64(totalSize) / float64(elapsed) - maxSize, increasingSize = calculateNextMaxSizeAndDirection(maxSize, speed, prevSpeed, increasingSize) - prevSpeed = speed - totalSize = 0 - blocksIndex = 0 - if err := maybePersistTxs(st); err != nil { - return err + zap.S().Infof("Skipping to height %d", startHeight) + if err = imp.SkipToHeight(ctx, startHeight); err != nil { + return errors.Wrap(err, "failed to skip to state height") + } + zap.S().Infof("Start importing %d blocks", nBlocks) + return imp.Import(ctx, nBlocks) +} + +func selectImporter(params ImportParams, state State) (Importer, error) { + if err := params.validate(); err != nil { // sanity check + return nil, errors.Wrap(err, "invalid import params") + } + if params.LightNodeMode { + imp, err := NewSnapshotsImporter(params.Schema, state, params.BlockchainPath, params.SnapshotsPath) + if err != nil { + return nil, errors.Wrap(err, "failed to create snapshots importer") } + return imp, nil } - return nil + imp, err := NewBlocksImporter(params.Schema, state, params.BlockchainPath) + if err != nil { + return nil, errors.Wrap(err, "failed to create blocks importer") + } + return imp, nil } func CheckBalances(st State, balancesPath string) error { diff --git a/pkg/importer/snapshots_importer.go b/pkg/importer/snapshots_importer.go new file mode 100644 index 000000000..527cd6448 --- /dev/null +++ b/pkg/importer/snapshots_importer.go @@ -0,0 +1,108 @@ +package importer + +import ( + "context" + "fmt" + "time" + + "github.com/wavesplatform/gowaves/pkg/proto" +) + +type SnapshotsImporter struct { + scheme proto.Scheme + st State + + br *blocksReader + sr *snapshotsReader + reg *speedRegulator + + h uint64 +} + +func NewSnapshotsImporter(scheme proto.Scheme, st State, blocksPath, snapshotsPath string) (*SnapshotsImporter, error) { + br, err := newBlocksReader(blocksPath) + if err != nil { + return nil, fmt.Errorf("failed to create snapshots importer: %w", err) + } + sr, err := newSnapshotsReader(scheme, snapshotsPath) + if err != nil { + return nil, fmt.Errorf("failed to create snapshots importer: %w", err) + } + return &SnapshotsImporter{scheme: scheme, st: st, br: br, sr: sr, reg: newSpeedRegulator()}, nil +} + +func (imp *SnapshotsImporter) SkipToHeight(ctx context.Context, height proto.Height) error { + imp.h = uint64(1) + if height < imp.h { + return fmt.Errorf("invalid initial height: %d", height) + } + for { + if ctx.Err() != nil { + return ctx.Err() + } + if imp.h == height { + return nil + } + size, err := imp.br.readSize() + if err != nil { + return fmt.Errorf("failed to skip to height %d: %w", height, err) + } + imp.reg.updateTotalSize(size) + imp.br.skip(size) + size, err = imp.sr.readSize() + if err != nil { + return fmt.Errorf("failed to skip to height %d: %w", height, err) + } + imp.sr.skip(size) + imp.h++ + } +} + +func (imp *SnapshotsImporter) Import(ctx context.Context, number uint64) error { + var blocks [MaxBlocksBatchSize][]byte + var snapshots [MaxBlocksBatchSize]*proto.BlockSnapshot + index := 0 + for count := imp.h; count <= number; count++ { + if ctx.Err() != nil { + return ctx.Err() + } + // reading snapshots + snapshot, err := imp.sr.readSnapshot() + if err != nil { + return err + } + snapshots[index] = snapshot + + size, sErr := imp.br.readSize() + if sErr != nil { + return sErr + } + imp.reg.updateTotalSize(size) + block, rErr := imp.br.readBlock(size) + if rErr != nil { + return rErr + } + blocks[index] = block + index++ + if imp.reg.incomplete() && (index != MaxBlocksBatchSize) && (count != number) { + continue + } + start := time.Now() + if abErr := imp.st.AddBlocksWithSnapshots(blocks[:index], snapshots[:index]); abErr != nil { + return abErr + } + imp.reg.calculateSpeed(start) + index = 0 + if pErr := maybePersistTxs(imp.st); pErr != nil { + return pErr + } + } + return nil +} + +func (imp *SnapshotsImporter) Close() error { + if err := imp.sr.close(); err != nil { + return err + } + return imp.br.close() +} diff --git a/pkg/importer/snapshots_reader.go b/pkg/importer/snapshots_reader.go new file mode 100644 index 000000000..e3a71a649 --- /dev/null +++ b/pkg/importer/snapshots_reader.go @@ -0,0 +1,65 @@ +package importer + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/wavesplatform/gowaves/pkg/proto" +) + +type snapshotsReader struct { + scheme proto.Scheme + f *os.File + pos int64 +} + +func newSnapshotsReader(scheme proto.Scheme, snapshotsPath string) (*snapshotsReader, error) { + f, err := os.Open(filepath.Clean(snapshotsPath)) + if err != nil { + return nil, fmt.Errorf("failed to open snapshots file: %w", err) + } + return &snapshotsReader{scheme: scheme, f: f, pos: 0}, nil +} + +func (sr *snapshotsReader) readSize() (uint32, error) { + buf := make([]byte, uint32Size) + n, err := sr.f.ReadAt(buf, sr.pos) + if err != nil { + return 0, fmt.Errorf("failed to read block snapshot size: %w", err) + } + sr.pos += int64(n) + size := binary.BigEndian.Uint32(buf) + if size == 0 { + return 0, errors.New("corrupted snapshots file: invalid snapshot size") + } + return size, nil +} + +func (sr *snapshotsReader) skip(size uint32) { + sr.pos += int64(size) +} + +func (sr *snapshotsReader) readSnapshot() (*proto.BlockSnapshot, error) { + size, sErr := sr.readSize() + if sErr != nil { + return nil, sErr + } + buf := make([]byte, size) + n, rErr := sr.f.ReadAt(buf, sr.pos) + if rErr != nil { + return nil, fmt.Errorf("failed to read snapshot: %w", rErr) + } + sr.pos += int64(n) + snapshot := &proto.BlockSnapshot{} + if err := snapshot.UnmarshalBinaryImport(buf, sr.scheme); err != nil { + return nil, fmt.Errorf("failed to unmarshal snapshot: %w", err) + } + return snapshot, nil +} + +func (sr *snapshotsReader) close() error { + return sr.f.Close() +} diff --git a/pkg/importer/speed_regulator.go b/pkg/importer/speed_regulator.go new file mode 100644 index 000000000..4e3462397 --- /dev/null +++ b/pkg/importer/speed_regulator.go @@ -0,0 +1,53 @@ +package importer + +import "time" + +type speedRegulator struct { + prevSpeed float64 + speed float64 + maxSize int + increasing bool + totalSize int +} + +func newSpeedRegulator() *speedRegulator { + return &speedRegulator{maxSize: initTotalBatchSize, increasing: true} +} + +func (r *speedRegulator) updateTotalSize(size uint32) { + r.totalSize += int(size) +} + +func (r *speedRegulator) incomplete() bool { + return r.totalSize < r.maxSize +} + +func (r *speedRegulator) calculateSpeed(start time.Time) { + elapsed := time.Since(start) + r.speed = float64(r.totalSize) / float64(elapsed) + r.maxSize, r.increasing = r.calculateNextMaxSizeAndDirection() + r.prevSpeed = r.speed + r.totalSize = 0 +} + +func (r *speedRegulator) calculateNextMaxSizeAndDirection() (int, bool) { + maxSize := r.maxSize + increasing := r.increasing + switch { + case r.speed > r.prevSpeed && r.increasing: + maxSize += sizeAdjustment + maxSize = min(maxSize, MaxTotalBatchSize) + case r.speed > r.prevSpeed && !r.increasing: + maxSize -= sizeAdjustment + maxSize = max(maxSize, initTotalBatchSize) + case r.speed < r.prevSpeed && r.increasing: + increasing = false + maxSize -= sizeAdjustment + maxSize = max(maxSize, initTotalBatchSize) + case r.speed < r.prevSpeed && !r.increasing: + increasing = true + maxSize += sizeAdjustment + maxSize = min(maxSize, MaxTotalBatchSize) + } + return maxSize, increasing +} diff --git a/pkg/libs/microblock_cache/microblock_cache.go b/pkg/libs/microblock_cache/microblock_cache.go index f83f0aca0..10e58cac3 100644 --- a/pkg/libs/microblock_cache/microblock_cache.go +++ b/pkg/libs/microblock_cache/microblock_cache.go @@ -5,26 +5,62 @@ import ( "github.com/wavesplatform/gowaves/pkg/util/fifo_cache" ) -type MicroblockCache struct { +const microBlockCacheSize = 24 + +type microBlockWithSnapshot struct { + microBlock *proto.MicroBlock // always not nil + snapshot *proto.BlockSnapshot // can be nil +} + +type MicroBlockCache struct { cache *fifo_cache.FIFOCache } -func NewMicroblockCache() *MicroblockCache { - return &MicroblockCache{ - cache: fifo_cache.New(24), +func NewMicroBlockCache() *MicroBlockCache { + return &MicroBlockCache{ + cache: fifo_cache.New(microBlockCacheSize), } } -func (a *MicroblockCache) Add(blockID proto.BlockID, micro *proto.MicroBlock) { - a.cache.Add2(blockID.Bytes(), micro) +func (a *MicroBlockCache) AddMicroBlock( + blockID proto.BlockID, + micro *proto.MicroBlock, +) { + a.cache.Add2(blockID.Bytes(), µBlockWithSnapshot{ + microBlock: micro, + snapshot: nil, // intentionally nil + }) +} + +func (a *MicroBlockCache) AddMicroBlockWithSnapshot( + blockID proto.BlockID, + micro *proto.MicroBlock, + snapshot *proto.BlockSnapshot, +) { + a.cache.Add2(blockID.Bytes(), µBlockWithSnapshot{ + microBlock: micro, + snapshot: snapshot, + }) +} + +func (a *MicroBlockCache) GetBlock(sig proto.BlockID) (*proto.MicroBlock, bool) { + rs, ok := a.cache.Get(sig.Bytes()) + if !ok { + return nil, false + } + return rs.(*microBlockWithSnapshot).microBlock, true } -func (a *MicroblockCache) Get(sig proto.BlockID) (*proto.MicroBlock, bool) { +func (a *MicroBlockCache) GetSnapshot(sig proto.BlockID) (*proto.BlockSnapshot, bool) { rs, ok := a.cache.Get(sig.Bytes()) if !ok { return nil, false } - return rs.(*proto.MicroBlock), true + var ( + snapshot = rs.(*microBlockWithSnapshot).snapshot + existInCache = snapshot != nil + ) + return snapshot, existInCache } type MicroblockInvCache struct { diff --git a/pkg/libs/ordered_blocks/optimistic_load.go b/pkg/libs/ordered_blocks/optimistic_load.go index 5de85e720..f00712358 100644 --- a/pkg/libs/ordered_blocks/optimistic_load.go +++ b/pkg/libs/ordered_blocks/optimistic_load.go @@ -7,12 +7,14 @@ import ( type OrderedBlocks struct { requested []proto.BlockID blocks map[proto.BlockID]*proto.Block + snapshots map[proto.BlockID]*proto.BlockSnapshot } func NewOrderedBlocks() *OrderedBlocks { return &OrderedBlocks{ requested: nil, blocks: make(map[proto.BlockID]*proto.Block), + snapshots: make(map[proto.BlockID]*proto.BlockSnapshot), } } @@ -25,28 +27,40 @@ func (a *OrderedBlocks) SetBlock(b *proto.Block) { a.blocks[b.BlockID()] = b } -func (a *OrderedBlocks) pop() (proto.BlockID, *proto.Block, bool) { +func (a *OrderedBlocks) SetSnapshot(blockID proto.BlockID, snapshot *proto.BlockSnapshot) { + a.snapshots[blockID] = snapshot +} + +func (a *OrderedBlocks) pop(isLightNode bool) (proto.BlockID, *proto.Block, *proto.BlockSnapshot, bool) { if len(a.requested) == 0 { - return proto.BlockID{}, nil, false + return proto.BlockID{}, nil, nil, false } firstSig := a.requested[0] bts := a.blocks[firstSig] + bsn := a.snapshots[firstSig] if bts != nil { delete(a.blocks, firstSig) + if isLightNode && bsn != nil { + delete(a.snapshots, firstSig) + a.requested = a.requested[1:] + return firstSig, bts, bsn, true + } a.requested = a.requested[1:] - return firstSig, bts, true + return firstSig, bts, nil, true } - return proto.BlockID{}, nil, false + return proto.BlockID{}, nil, nil, false } -func (a *OrderedBlocks) PopAll() []*proto.Block { - var out []*proto.Block +func (a *OrderedBlocks) PopAll(isLightNode bool) ([]*proto.Block, []*proto.BlockSnapshot) { + var outBlocks []*proto.Block + var outSnapshots []*proto.BlockSnapshot for { - _, b, ok := a.pop() + _, b, s, ok := a.pop(isLightNode) if !ok { - return out + return outBlocks, outSnapshots } - out = append(out, b) + outBlocks = append(outBlocks, b) + outSnapshots = append(outSnapshots, s) } } @@ -58,6 +72,7 @@ func (a *OrderedBlocks) Add(sig proto.BlockID) bool { } a.requested = append(a.requested, sig) a.blocks[sig] = nil + a.snapshots[sig] = nil return true } @@ -66,9 +81,12 @@ func (a *OrderedBlocks) RequestedCount() int { } // blocks count available for pop -func (a *OrderedBlocks) ReceivedCount() int { +func (a *OrderedBlocks) ReceivedCount(isLightNode bool) int { for i, sig := range a.requested { - if a.blocks[sig] == nil { + blockIsNil := a.blocks[sig] == nil + if isLightNode && (blockIsNil || a.snapshots[sig] == nil) { + return i + } else if !isLightNode && blockIsNil { return i } } diff --git a/pkg/libs/ordered_blocks/optimistic_load_test.go b/pkg/libs/ordered_blocks/optimistic_load_test.go index d44907d94..f3c70a72c 100644 --- a/pkg/libs/ordered_blocks/optimistic_load_test.go +++ b/pkg/libs/ordered_blocks/optimistic_load_test.go @@ -23,34 +23,38 @@ func makeBlock(sig crypto.Signature) *proto.Block { func TestOrderedBlocks(t *testing.T) { o := ordered_blocks.NewOrderedBlocks() o.Add(proto.NewBlockIDFromSignature(sig1)) - require.Len(t, o.PopAll(), 0) + b, _ := o.PopAll(false) + require.Len(t, b, 0) + b, _ = o.PopAll(false) o.Add(proto.NewBlockIDFromSignature(sig2)) - require.Len(t, o.PopAll(), 0) + require.Len(t, b, 0) // second block arrived first, no sequence right now o.SetBlock(makeBlock(sig2)) - require.Len(t, o.PopAll(), 0) + b, _ = o.PopAll(false) + require.Len(t, b, 0) //require.Equal(t, 0, o.ReceivedCount()) // finally arrived first block, so seq contains 2 blocks o.SetBlock(makeBlock(sig1)) //require.Equal(t, 2, o.ReceivedCount()) - require.Len(t, o.PopAll(), 2) + b, _ = o.PopAll(false) + require.Len(t, b, 2) } func TestOrderedBlocks_AvailableCount(t *testing.T) { o := ordered_blocks.NewOrderedBlocks() o.Add(proto.NewBlockIDFromSignature(sig1)) o.Add(proto.NewBlockIDFromSignature(sig2)) - require.Equal(t, 0, o.ReceivedCount()) + require.Equal(t, 0, o.ReceivedCount(false)) o.SetBlock(makeBlock(sig1)) - require.Equal(t, 1, o.ReceivedCount()) + require.Equal(t, 1, o.ReceivedCount(false)) o.SetBlock(makeBlock(sig2)) - require.Equal(t, 2, o.ReceivedCount()) + require.Equal(t, 2, o.ReceivedCount(false)) - o.PopAll() - require.Equal(t, 0, o.ReceivedCount()) + o.PopAll(false) + require.Equal(t, 0, o.ReceivedCount(false)) } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index e2393ba66..e89fdf866 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -11,9 +11,10 @@ import ( influx "github.com/influxdata/influxdb1-client/v2" "github.com/pkg/errors" + "go.uber.org/zap" + "github.com/wavesplatform/gowaves/pkg/crypto" "github.com/wavesplatform/gowaves/pkg/proto" - "go.uber.org/zap" ) const ( @@ -475,7 +476,7 @@ func (r *reporter) run(ctx context.Context) { rep = nil err := r.c.Close() if err != nil { - zap.S().Warn("Failed to close connection to InfluxDB: %v", err) + zap.S().Warnf("Failed to close connection to InfluxDB: %v", err) } return case <-ticker.C: @@ -537,7 +538,7 @@ func parseURL(s string) (influx.HTTPConfig, string, error) { func reportBlock(t tags, f fields) { p, err := influx.NewPoint("block", t, f, time.Now()) if err != nil { - zap.S().Warn("Failed to create metrics point 'block': %v", err) + zap.S().Warnf("Failed to create metrics point 'block': %v", err) return } rep.in <- p @@ -546,7 +547,7 @@ func reportBlock(t tags, f fields) { func reportFSM(t tags, f fields) { p, err := influx.NewPoint("fsm", t, f, time.Now()) if err != nil { - zap.S().Warn("Failed to create metrics point 'fsm': %v", err) + zap.S().Warnf("Failed to create metrics point 'fsm': %v", err) return } rep.in <- p diff --git a/pkg/mock/state.go b/pkg/mock/state.go index bc10bff04..b8bda70bc 100644 --- a/pkg/mock/state.go +++ b/pkg/mock/state.go @@ -1100,6 +1100,20 @@ func (mr *MockStateModifierMockRecorder) AddBlocks(blocks interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlocks", reflect.TypeOf((*MockStateModifier)(nil).AddBlocks), blocks) } +// AddBlocksWithSnapshots mocks base method. +func (m *MockStateModifier) AddBlocksWithSnapshots(blocks [][]byte, snapshots []*proto.BlockSnapshot) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddBlocksWithSnapshots", blocks, snapshots) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddBlocksWithSnapshots indicates an expected call of AddBlocksWithSnapshots. +func (mr *MockStateModifierMockRecorder) AddBlocksWithSnapshots(blocks, snapshots interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlocksWithSnapshots", reflect.TypeOf((*MockStateModifier)(nil).AddBlocksWithSnapshots), blocks, snapshots) +} + // AddDeserializedBlock mocks base method. func (m *MockStateModifier) AddDeserializedBlock(block *proto.Block) (*proto.Block, error) { m.ctrl.T.Helper() @@ -1130,6 +1144,21 @@ func (mr *MockStateModifierMockRecorder) AddDeserializedBlocks(blocks interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeserializedBlocks", reflect.TypeOf((*MockStateModifier)(nil).AddDeserializedBlocks), blocks) } +// AddDeserializedBlocksWithSnapshots mocks base method. +func (m *MockStateModifier) AddDeserializedBlocksWithSnapshots(blocks []*proto.Block, snapshots []*proto.BlockSnapshot) (*proto.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDeserializedBlocksWithSnapshots", blocks, snapshots) + ret0, _ := ret[0].(*proto.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDeserializedBlocksWithSnapshots indicates an expected call of AddDeserializedBlocksWithSnapshots. +func (mr *MockStateModifierMockRecorder) AddDeserializedBlocksWithSnapshots(blocks, snapshots interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeserializedBlocksWithSnapshots", reflect.TypeOf((*MockStateModifier)(nil).AddDeserializedBlocksWithSnapshots), blocks, snapshots) +} + // Close mocks base method. func (m *MockStateModifier) Close() error { m.ctrl.T.Helper() @@ -1358,6 +1387,20 @@ func (mr *MockStateMockRecorder) AddBlocks(blocks interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlocks", reflect.TypeOf((*MockState)(nil).AddBlocks), blocks) } +// AddBlocksWithSnapshots mocks base method. +func (m *MockState) AddBlocksWithSnapshots(blocks [][]byte, snapshots []*proto.BlockSnapshot) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddBlocksWithSnapshots", blocks, snapshots) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddBlocksWithSnapshots indicates an expected call of AddBlocksWithSnapshots. +func (mr *MockStateMockRecorder) AddBlocksWithSnapshots(blocks, snapshots interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlocksWithSnapshots", reflect.TypeOf((*MockState)(nil).AddBlocksWithSnapshots), blocks, snapshots) +} + // AddDeserializedBlock mocks base method. func (m *MockState) AddDeserializedBlock(block *proto.Block) (*proto.Block, error) { m.ctrl.T.Helper() @@ -1388,6 +1431,21 @@ func (mr *MockStateMockRecorder) AddDeserializedBlocks(blocks interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeserializedBlocks", reflect.TypeOf((*MockState)(nil).AddDeserializedBlocks), blocks) } +// AddDeserializedBlocksWithSnapshots mocks base method. +func (m *MockState) AddDeserializedBlocksWithSnapshots(blocks []*proto.Block, snapshots []*proto.BlockSnapshot) (*proto.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddDeserializedBlocksWithSnapshots", blocks, snapshots) + ret0, _ := ret[0].(*proto.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddDeserializedBlocksWithSnapshots indicates an expected call of AddDeserializedBlocksWithSnapshots. +func (mr *MockStateMockRecorder) AddDeserializedBlocksWithSnapshots(blocks, snapshots interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddDeserializedBlocksWithSnapshots", reflect.TypeOf((*MockState)(nil).AddDeserializedBlocksWithSnapshots), blocks, snapshots) +} + // AddrByAlias mocks base method. func (m *MockState) AddrByAlias(alias proto.Alias) (proto.WavesAddress, error) { m.ctrl.T.Helper() diff --git a/pkg/node/actions_by_type.go b/pkg/node/actions_by_type.go index b447c9fe9..1a77fe5df 100644 --- a/pkg/node/actions_by_type.go +++ b/pkg/node/actions_by_type.go @@ -4,13 +4,13 @@ import ( "math/big" "reflect" - "github.com/wavesplatform/gowaves/pkg/logging" - "github.com/wavesplatform/gowaves/pkg/node/peers/storage" - "go.uber.org/zap" "github.com/wavesplatform/gowaves/pkg/crypto" + g "github.com/wavesplatform/gowaves/pkg/grpc/generated/waves" + "github.com/wavesplatform/gowaves/pkg/logging" "github.com/wavesplatform/gowaves/pkg/node/fsm" + "github.com/wavesplatform/gowaves/pkg/node/peers/storage" "github.com/wavesplatform/gowaves/pkg/p2p/peer" "github.com/wavesplatform/gowaves/pkg/p2p/peer/extension" "github.com/wavesplatform/gowaves/pkg/proto" @@ -179,7 +179,7 @@ func MicroBlockRequestAction( if err != nil { return nil, err } - micro, ok := services.MicroBlockCache.Get(blockID) + micro, ok := services.MicroBlockCache.GetBlock(blockID) if ok { _ = extension.NewPeerExtension(mess.ID, services.Scheme).SendMicroBlock(micro) } @@ -255,23 +255,115 @@ func PBTransactionAction(_ services.Services, mess peer.ProtoMessage, fsm *fsm.F return fsm.Transaction(mess.ID, t) } +func MicroSnapshotRequestAction(services services.Services, mess peer.ProtoMessage, _ *fsm.FSM) (fsm.Async, error) { + blockIDBytes := mess.Message.(*proto.MicroBlockSnapshotRequestMessage).BlockIDBytes + blockID, err := proto.NewBlockIDFromBytes(blockIDBytes) + if err != nil { + return nil, err + } + sn, ok := services.MicroBlockCache.GetSnapshot(blockID) + if ok { + snapshotProto, errToProto := sn.ToProtobuf() + if errToProto != nil { + return nil, errToProto + } + sProto := g.MicroBlockSnapshot{ + Snapshots: snapshotProto, + TotalBlockId: blockIDBytes, + } + bsmBytes, errMarshall := sProto.MarshalVTStrict() + if errMarshall != nil { + return nil, errMarshall + } + bs := proto.MicroBlockSnapshotMessage{Bytes: bsmBytes} + mess.ID.SendMessage(&bs) + } + return nil, nil +} + +func GetSnapshotAction(services services.Services, mess peer.ProtoMessage, _ *fsm.FSM) (fsm.Async, error) { + blockID := mess.Message.(*proto.GetBlockSnapshotMessage).BlockID + h, err := services.State.BlockIDToHeight(blockID) + if err != nil { + return nil, err + } + snapshot, err := services.State.SnapshotsAtHeight(h) + if err != nil { + return nil, err + } + snapshotProto, err := snapshot.ToProtobuf() + if err != nil { + return nil, err + } + sProto := g.BlockSnapshot{ + Snapshots: snapshotProto, + BlockId: blockID.Bytes(), + } + bsmBytes, err := sProto.MarshalVTStrict() + if err != nil { + return nil, err + } + bs := proto.BlockSnapshotMessage{Bytes: bsmBytes} + mess.ID.SendMessage(&bs) + return nil, nil +} + +func BlockSnapshotAction(services services.Services, mess peer.ProtoMessage, fsm *fsm.FSM) (fsm.Async, error) { + protoMess := g.BlockSnapshot{} + if err := protoMess.UnmarshalVT(mess.Message.(*proto.BlockSnapshotMessage).Bytes); err != nil { + zap.S().Named(logging.NetworkNamespace).Debugf("Failed to deserialize block snapshot: %v", err) + return nil, err + } + blockID, err := proto.NewBlockIDFromBytes(protoMess.BlockId) + if err != nil { + return nil, err + } + blockSnapshot, err := proto.BlockSnapshotFromProtobuf(services.Scheme, protoMess.Snapshots) + if err != nil { + return nil, err + } + zap.S().Named(logging.NetworkNamespace).Debugf("Snapshot for block '%s' received", blockID.String()) + return fsm.BlockSnapshot(mess.ID, blockID, blockSnapshot) +} + +func MicroBlockSnapshotAction(services services.Services, mess peer.ProtoMessage, fsm *fsm.FSM) (fsm.Async, error) { + protoMess := g.MicroBlockSnapshot{} + if err := protoMess.UnmarshalVT(mess.Message.(*proto.MicroBlockSnapshotMessage).Bytes); err != nil { + zap.S().Named(logging.NetworkNamespace).Debugf("Failed to deserialize micro block snapshot: %v", err) + return nil, err + } + blockID, err := proto.NewBlockIDFromBytes(protoMess.TotalBlockId) + if err != nil { + return nil, err + } + blockSnapshot, err := proto.BlockSnapshotFromProtobuf(services.Scheme, protoMess.Snapshots) + if err != nil { + return nil, err + } + return fsm.MicroBlockSnapshot(mess.ID, blockID, blockSnapshot) +} + func createActions() map[reflect.Type]Action { return map[reflect.Type]Action{ - reflect.TypeOf(&proto.ScoreMessage{}): ScoreAction, - reflect.TypeOf(&proto.GetPeersMessage{}): GetPeersAction, - reflect.TypeOf(&proto.PeersMessage{}): PeersAction, - reflect.TypeOf(&proto.BlockMessage{}): BlockAction, - reflect.TypeOf(&proto.GetBlockMessage{}): GetBlockAction, - reflect.TypeOf(&proto.SignaturesMessage{}): SignaturesAction, - reflect.TypeOf(&proto.GetSignaturesMessage{}): GetSignaturesAction, - reflect.TypeOf(&proto.MicroBlockInvMessage{}): MicroBlockInvAction, - reflect.TypeOf(&proto.MicroBlockRequestMessage{}): MicroBlockRequestAction, - reflect.TypeOf(&proto.MicroBlockMessage{}): MicroBlockAction, - reflect.TypeOf(&proto.PBBlockMessage{}): PBBlockAction, - reflect.TypeOf(&proto.PBMicroBlockMessage{}): PBMicroBlockAction, - reflect.TypeOf(&proto.GetBlockIdsMessage{}): GetBlockIdsAction, - reflect.TypeOf(&proto.BlockIdsMessage{}): BlockIdsAction, - reflect.TypeOf(&proto.TransactionMessage{}): TransactionAction, - reflect.TypeOf(&proto.PBTransactionMessage{}): PBTransactionAction, + reflect.TypeOf(&proto.ScoreMessage{}): ScoreAction, + reflect.TypeOf(&proto.GetPeersMessage{}): GetPeersAction, + reflect.TypeOf(&proto.PeersMessage{}): PeersAction, + reflect.TypeOf(&proto.BlockMessage{}): BlockAction, + reflect.TypeOf(&proto.GetBlockMessage{}): GetBlockAction, + reflect.TypeOf(&proto.SignaturesMessage{}): SignaturesAction, + reflect.TypeOf(&proto.GetSignaturesMessage{}): GetSignaturesAction, + reflect.TypeOf(&proto.MicroBlockInvMessage{}): MicroBlockInvAction, + reflect.TypeOf(&proto.MicroBlockRequestMessage{}): MicroBlockRequestAction, + reflect.TypeOf(&proto.MicroBlockMessage{}): MicroBlockAction, + reflect.TypeOf(&proto.PBBlockMessage{}): PBBlockAction, + reflect.TypeOf(&proto.PBMicroBlockMessage{}): PBMicroBlockAction, + reflect.TypeOf(&proto.GetBlockIdsMessage{}): GetBlockIdsAction, + reflect.TypeOf(&proto.BlockIdsMessage{}): BlockIdsAction, + reflect.TypeOf(&proto.TransactionMessage{}): TransactionAction, + reflect.TypeOf(&proto.PBTransactionMessage{}): PBTransactionAction, + reflect.TypeOf(&proto.GetBlockSnapshotMessage{}): GetSnapshotAction, + reflect.TypeOf(&proto.MicroBlockSnapshotRequestMessage{}): MicroSnapshotRequestAction, + reflect.TypeOf(&proto.BlockSnapshotMessage{}): BlockSnapshotAction, + reflect.TypeOf(&proto.MicroBlockSnapshotMessage{}): MicroBlockSnapshotAction, } } diff --git a/pkg/node/blocks_applier/blocks_applier.go b/pkg/node/blocks_applier/blocks_applier.go index 7191bfe23..38d4853f4 100644 --- a/pkg/node/blocks_applier/blocks_applier.go +++ b/pkg/node/blocks_applier/blocks_applier.go @@ -1,6 +1,7 @@ package blocks_applier import ( + stderrors "errors" "math/big" "github.com/pkg/errors" @@ -8,6 +9,8 @@ import ( "github.com/wavesplatform/gowaves/pkg/state" ) +const maxRollbackDeltaHeight = 100 + type innerBlocksApplier struct { } @@ -17,8 +20,10 @@ type innerState interface { ScoreAtHeight(height proto.Height) (*big.Int, error) BlockIDToHeight(blockID proto.BlockID) (proto.Height, error) AddDeserializedBlocks(blocks []*proto.Block) (*proto.Block, error) + AddDeserializedBlocksWithSnapshots(blocks []*proto.Block, snapshots []*proto.BlockSnapshot) (*proto.Block, error) BlockByHeight(height proto.Height) (*proto.Block, error) RollbackToHeight(height proto.Height) error + SnapshotsAtHeight(height proto.Height) (proto.BlockSnapshot, error) } func (a *innerBlocksApplier) exists(storage innerState, block *proto.Block) (bool, error) { @@ -32,55 +37,125 @@ func (a *innerBlocksApplier) exists(storage innerState, block *proto.Block) (boo return false, err } -func (a *innerBlocksApplier) apply(storage innerState, blocks []*proto.Block) (proto.Height, error) { +func (a *innerBlocksApplier) apply( + storage innerState, + blocks []*proto.Block, +) (proto.Height, error) { if len(blocks) == 0 { return 0, errors.New("empty blocks") } + currentHeight, parentHeight, err := a.getParentAndCurrentHeight(storage, blocks) + if err != nil { + return 0, err + } + + // so, new blocks has higher score, try to apply it. + // Do we need rollback? + if parentHeight == currentHeight { + // no, don't rollback, just add blocks + _, err = storage.AddDeserializedBlocks(blocks) + if err != nil { + return 0, err + } + return currentHeight + proto.Height(len(blocks)), nil + } + + deltaHeight := currentHeight - parentHeight + if deltaHeight > maxRollbackDeltaHeight { // max number that we can rollback + return 0, errors.Errorf( + "can't apply new blocks, rollback more than %d blocks, %d", maxRollbackDeltaHeight, deltaHeight) + } + + // save previously added blocks. If new firstBlock failed to add, then return them back + rollbackBlocks, err := a.getRollbackBlocks(storage, deltaHeight, parentHeight) + if err != nil { + return 0, err + } + + err = storage.RollbackToHeight(parentHeight) + if err != nil { + return 0, errors.Wrapf(err, "failed to rollback to height %d", parentHeight) + } + // applying new blocks + _, err = storage.AddDeserializedBlocks(blocks) + if err != nil { + // return back saved blocks + _, err2 := storage.AddDeserializedBlocks(rollbackBlocks) + if err2 != nil { + return 0, errors.Wrap(err2, "failed rollback deserialized blocks") + } + + return 0, errors.Wrapf(stderrors.Join(err, err2), + "failed add deserialized blocks, first block id %s", blocks[0].BlockID().String()) + } + return parentHeight + proto.Height(len(blocks)), nil +} + +func (a *innerBlocksApplier) getParentAndCurrentHeight( + storage innerState, + blocks []*proto.Block, +) (proto.Height, proto.Height, error) { firstBlock := blocks[0] // check first block if exists _, err := storage.Block(firstBlock.BlockID()) if err == nil { - return 0, proto.NewInfoMsg(errors.Errorf("first block %s exists", firstBlock.BlockID().String())) + return 0, 0, proto.NewInfoMsg(errors.Errorf("first block %s exists", firstBlock.BlockID().String())) } if !state.IsNotFound(err) { - return 0, errors.Wrap(err, "unknown error") + return 0, 0, errors.Wrap(err, "unknown error") } currentHeight, err := storage.Height() if err != nil { - return 0, err + return 0, 0, err } // current score. Main idea is to find parent block, and check if score // of all passed blocks higher than currentScore. If yes, we can add blocks currentScore, err := storage.ScoreAtHeight(currentHeight) if err != nil { - return 0, err + return 0, 0, err } // try to find parent. If not - we can't add blocks, skip it parentHeight, err := storage.BlockIDToHeight(firstBlock.Parent) if err != nil { - return 0, proto.NewInfoMsg(errors.Wrapf(err, "failed get parent height, firstBlock id %s, for firstBlock %s", + return 0, 0, proto.NewInfoMsg(errors.Wrapf(err, "failed get parent height, firstBlock id %s, for firstBlock %s", firstBlock.Parent.String(), firstBlock.BlockID().String())) } // calculate score of all passed blocks forkScore, err := calcMultipleScore(blocks) if err != nil { - return 0, errors.Wrap(err, "failed calculate score of passed blocks") + return 0, 0, errors.Wrap(err, "failed calculate score of passed blocks") } parentScore, err := storage.ScoreAtHeight(parentHeight) if err != nil { - return 0, errors.Wrapf(err, "failed get score at %d", parentHeight) + return 0, 0, errors.Wrapf(err, "failed get score at %d", parentHeight) } cumulativeScore := forkScore.Add(forkScore, parentScore) if currentScore.Cmp(cumulativeScore) >= 0 { // current score is higher or the same as fork score - do not apply blocks - return 0, proto.NewInfoMsg(errors.Errorf("low fork score: current blockchain score (%s) is higher than or equal to fork's score (%s)", + return 0, 0, proto.NewInfoMsg(errors.Errorf( + "low fork score: current blockchain score (%s) is higher than or equal to fork's score (%s)", currentScore.String(), cumulativeScore.String())) } + return currentHeight, parentHeight, nil +} + +func (a *innerBlocksApplier) applyWithSnapshots( + storage innerState, + blocks []*proto.Block, + snapshots []*proto.BlockSnapshot, +) (proto.Height, error) { + if len(blocks) == 0 { + return 0, errors.New("empty blocks") + } + currentHeight, parentHeight, err := a.getParentAndCurrentHeight(storage, blocks) + if err != nil { + return 0, err + } - // so, new blocks has higher score, try apply it. + // so, new blocks has higher score, try to apply it. // Do we need rollback? if parentHeight == currentHeight { // no, don't rollback, just add blocks - _, err := storage.AddDeserializedBlocks(blocks) + _, err = storage.AddDeserializedBlocksWithSnapshots(blocks, snapshots) if err != nil { return 0, err } @@ -88,38 +163,128 @@ func (a *innerBlocksApplier) apply(storage innerState, blocks []*proto.Block) (p } deltaHeight := currentHeight - parentHeight - if deltaHeight > 100 { // max number that we can rollback - return 0, errors.Errorf("can't apply new blocks, rollback more than 100 blocks, %d", deltaHeight) + if deltaHeight > maxRollbackDeltaHeight { // max number that we can rollback + return 0, errors.Errorf( + "can't apply new blocks, rollback more than %d blocks, %d", maxRollbackDeltaHeight, deltaHeight) } // save previously added blocks. If new firstBlock failed to add, then return them back + rollbackBlocks, rollbackBlocksSnapshots, err := a.getRollbackBlocksAndSnapshots(storage, deltaHeight, parentHeight) + if err != nil { + return 0, err + } + + err = storage.RollbackToHeight(parentHeight) + if err != nil { + return 0, errors.Wrapf(err, "failed to rollback to height %d", parentHeight) + } + // applying new blocks + _, err = storage.AddDeserializedBlocksWithSnapshots(blocks, snapshots) + if err != nil { + // return back saved blocks + _, errDeserialized := storage.AddDeserializedBlocksWithSnapshots(rollbackBlocks, rollbackBlocksSnapshots) + if errDeserialized != nil { + return 0, errors.Wrap(errDeserialized, "failed rollback deserialized blocks") + } + return 0, errors.Wrapf(stderrors.Join(err, errDeserialized), + "failed add deserialized blocks, first block id %s", blocks[0].BlockID().String()) + } + return parentHeight + proto.Height(len(blocks)), nil +} + +func (a *innerBlocksApplier) getRollbackBlocksAndSnapshots( + storage innerState, + deltaHeight proto.Height, + parentHeight proto.Height, +) ([]*proto.Block, []*proto.BlockSnapshot, error) { + rollbackBlocks := make([]*proto.Block, 0, deltaHeight) + rollbackBlocksSnapshots := make([]*proto.BlockSnapshot, 0, deltaHeight) + for i := proto.Height(1); i <= deltaHeight; i++ { + block, err := storage.BlockByHeight(parentHeight + i) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get firstBlock by height %d", parentHeight+i) + } + rollbackBlocks = append(rollbackBlocks, block) + + snapshot, err := storage.SnapshotsAtHeight(parentHeight + i) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get snapshot by height %d", parentHeight+i) + } + rollbackBlocksSnapshots = append(rollbackBlocksSnapshots, &snapshot) + } + return rollbackBlocks, rollbackBlocksSnapshots, nil +} + +func (a *innerBlocksApplier) getRollbackBlocks( + storage innerState, + deltaHeight proto.Height, + parentHeight proto.Height, +) ([]*proto.Block, error) { rollbackBlocks := make([]*proto.Block, 0, deltaHeight) for i := proto.Height(1); i <= deltaHeight; i++ { block, err := storage.BlockByHeight(parentHeight + i) if err != nil { - return 0, errors.Wrapf(err, "failed to get firstBlock by height %d", parentHeight+i) + return nil, errors.Wrapf(err, "failed to get firstBlock by height %d", parentHeight+i) } rollbackBlocks = append(rollbackBlocks, block) } + return rollbackBlocks, nil +} + +func (a *innerBlocksApplier) applyMicro( + storage innerState, + block *proto.Block, +) (proto.Height, error) { + _, err := storage.Block(block.BlockID()) + if err == nil { + return 0, errors.Errorf("block '%s' already exist", block.BlockID().String()) + } + if !state.IsNotFound(err) { + return 0, errors.Wrap(err, "unexpected error") + } + + currentHeight, err := storage.Height() + if err != nil { + return 0, err + } + parentHeight, err := storage.BlockIDToHeight(block.Parent) + if err != nil { + return 0, errors.Wrapf(err, "failed get height of parent block '%s'", block.Parent.String()) + } + + if currentHeight-parentHeight != 1 { + return 0, errors.Errorf("invalid parent height %d", parentHeight) + } + + currentBlock, err := storage.BlockByHeight(currentHeight) + if err != nil { + return 0, errors.Wrapf(err, "failed to get current block by height %d", currentHeight) + } err = storage.RollbackToHeight(parentHeight) if err != nil { return 0, errors.Wrapf(err, "failed to rollback to height %d", parentHeight) } + // applying new blocks - _, err = storage.AddDeserializedBlocks(blocks) + _, err = storage.AddDeserializedBlocks([]*proto.Block{block}) if err != nil { // return back saved blocks - _, err2 := storage.AddDeserializedBlocks(rollbackBlocks) - if err2 != nil { - return 0, errors.Wrap(err2, "failed rollback deserialized blocks") + _, errAdd := storage.AddDeserializedBlocks([]*proto.Block{currentBlock}) + if errAdd != nil { + return 0, errors.Wrap(errAdd, "failed rollback block") } - return 0, errors.Wrapf(err, "failed add deserialized blocks, first block id %s", firstBlock.BlockID().String()) + return 0, errors.Wrapf(stderrors.Join(err, errAdd), + "failed apply new block '%s'", block.BlockID().String()) } - return parentHeight + proto.Height(len(blocks)), nil + return currentHeight, nil } -func (a *innerBlocksApplier) applyMicro(storage innerState, block *proto.Block) (proto.Height, error) { +func (a *innerBlocksApplier) applyMicroWithSnapshot( + storage innerState, + block *proto.Block, + snapshot *proto.BlockSnapshot, +) (proto.Height, error) { _, err := storage.Block(block.BlockID()) if err == nil { return 0, errors.Errorf("block '%s' already exist", block.BlockID().String()) @@ -145,6 +310,11 @@ func (a *innerBlocksApplier) applyMicro(storage innerState, block *proto.Block) if err != nil { return 0, errors.Wrapf(err, "failed to get current block by height %d", currentHeight) } + curSnapshot, errSAT := storage.SnapshotsAtHeight(currentHeight) + if errSAT != nil { + return 0, errSAT + } + currentSnapshotsToApply := []*proto.BlockSnapshot{&curSnapshot} err = storage.RollbackToHeight(parentHeight) if err != nil { @@ -152,14 +322,18 @@ func (a *innerBlocksApplier) applyMicro(storage innerState, block *proto.Block) } // applying new blocks - _, err = storage.AddDeserializedBlocks([]*proto.Block{block}) + _, err = storage.AddDeserializedBlocksWithSnapshots([]*proto.Block{block}, []*proto.BlockSnapshot{snapshot}) if err != nil { // return back saved blocks - _, err2 := storage.AddDeserializedBlocks([]*proto.Block{currentBlock}) - if err2 != nil { - return 0, errors.Wrap(err2, "failed rollback block") + _, errAdd := storage.AddDeserializedBlocksWithSnapshots( + []*proto.Block{currentBlock}, + currentSnapshotsToApply, + ) + if errAdd != nil { + return 0, errors.Wrap(errAdd, "failed rollback block") } - return 0, errors.Wrapf(err, "failed apply new block '%s'", block.BlockID().String()) + return 0, errors.Wrapf(stderrors.Join(err, errAdd), + "failed apply new block '%s'", block.BlockID().String()) } return currentHeight, nil } @@ -178,14 +352,36 @@ func (a *BlocksApplier) BlockExists(state state.State, block *proto.Block) (bool return a.inner.exists(state, block) } -func (a *BlocksApplier) Apply(state state.State, blocks []*proto.Block) (proto.Height, error) { +func (a *BlocksApplier) Apply( + state state.State, + blocks []*proto.Block, +) (proto.Height, error) { return a.inner.apply(state, blocks) } -func (a *BlocksApplier) ApplyMicro(state state.State, block *proto.Block) (proto.Height, error) { +func (a *BlocksApplier) ApplyMicro( + state state.State, + block *proto.Block, +) (proto.Height, error) { return a.inner.applyMicro(state, block) } +func (a *BlocksApplier) ApplyWithSnapshots( + state state.State, + blocks []*proto.Block, + snapshots []*proto.BlockSnapshot, +) (proto.Height, error) { + return a.inner.applyWithSnapshots(state, blocks, snapshots) +} + +func (a *BlocksApplier) ApplyMicroWithSnapshots( + state state.State, + block *proto.Block, + snapshot *proto.BlockSnapshot, +) (proto.Height, error) { + return a.inner.applyMicroWithSnapshot(state, block, snapshot) +} + func calcMultipleScore(blocks []*proto.Block) (*big.Int, error) { score := big.NewInt(0) for _, block := range blocks { diff --git a/pkg/node/blocks_applier/node_mocks.go b/pkg/node/blocks_applier/node_mocks.go index 2c53bab89..3a3da2d84 100644 --- a/pkg/node/blocks_applier/node_mocks.go +++ b/pkg/node/blocks_applier/node_mocks.go @@ -15,6 +15,7 @@ func notFound() state.StateError { type MockStateManager struct { state []*proto.Block + snapshots []*proto.BlockSnapshot id2Block map[proto.BlockID]*proto.Block Peers_ []proto.TCPAddr blockIDToHeight map[proto.BlockID]proto.Height @@ -117,6 +118,9 @@ func (a *MockStateManager) RollbackToHeight(height uint64) error { block := a.state[len(a.state)-1] a.state = a.state[:len(a.state)-1] delete(a.blockIDToHeight, block.BlockID()) + if len(a.snapshots) != 0 { + a.snapshots = a.snapshots[:len(a.snapshots)-1] + } } return nil } @@ -275,7 +279,10 @@ func (a *MockStateManager) AddDeserializedBlock(block *proto.Block) (*proto.Bloc a.blockIDToHeight[block.BlockID()] = proto.Height(len(a.state)) return block, nil } -func (a *MockStateManager) AddDeserializedBlocks(blocks []*proto.Block) (*proto.Block, error) { + +func (a *MockStateManager) AddDeserializedBlocks( + blocks []*proto.Block, +) (*proto.Block, error) { var out *proto.Block var err error for _, b := range blocks { @@ -286,6 +293,24 @@ func (a *MockStateManager) AddDeserializedBlocks(blocks []*proto.Block) (*proto. return out, nil } +func (a *MockStateManager) AddDeserializedBlocksWithSnapshots( + blocks []*proto.Block, + snapshots []*proto.BlockSnapshot, +) (*proto.Block, error) { + var out *proto.Block + var err error + if len(blocks) != len(snapshots) { + panic("the numbers of snapshots doesn't match the number of blocks") + } + for i, b := range blocks { + if out, err = a.AddDeserializedBlock(b); err != nil { + return nil, err + } + a.snapshots = append(a.snapshots, snapshots[i]) + } + return out, nil +} + func (a *MockStateManager) BlockBytes(_ proto.BlockID) ([]byte, error) { panic("implement me") } @@ -337,3 +362,10 @@ func (a *MockStateManager) StartProvidingExtendedApi() error { func (a *MockStateManager) HitSourceAtHeight(_ proto.Height) ([]byte, error) { panic("not implemented") } + +func (a *MockStateManager) SnapshotsAtHeight(h proto.Height) (proto.BlockSnapshot, error) { + if h > proto.Height(len(a.snapshots)) { + return proto.BlockSnapshot{}, notFound() + } + return *a.snapshots[h-1], nil +} diff --git a/pkg/node/fsm/fsm.go b/pkg/node/fsm/fsm.go index c656e1d1f..980a9172b 100644 --- a/pkg/node/fsm/fsm.go +++ b/pkg/node/fsm/fsm.go @@ -27,8 +27,24 @@ type Async []tasks.Task type BlocksApplier interface { BlockExists(state storage.State, block *proto.Block) (bool, error) - Apply(state storage.State, block []*proto.Block) (proto.Height, error) - ApplyMicro(state storage.State, block *proto.Block) (proto.Height, error) + Apply( + state storage.State, + block []*proto.Block, + ) (proto.Height, error) + ApplyMicro( + state storage.State, + block *proto.Block, + ) (proto.Height, error) + ApplyWithSnapshots( + state storage.State, + block []*proto.Block, + snapshots []*proto.BlockSnapshot, + ) (proto.Height, error) + ApplyMicroWithSnapshots( + state storage.State, + block *proto.Block, + snapshots *proto.BlockSnapshot, + ) (proto.Height, error) } type BaseInfo struct { @@ -63,6 +79,8 @@ type BaseInfo struct { skipMessageList *messages.SkipMessageList syncPeer *network.SyncPeer + + enableLightMode bool } func (a *BaseInfo) BroadcastTransaction(t proto.Transaction, receivedFrom peer.Peer) { @@ -79,11 +97,13 @@ func (a *BaseInfo) CleanUtx() { // States. const ( - IdleStateName = "Idle" - NGStateName = "NG" - PersistStateName = "Persist" - SyncStateName = "Sync" - HaltStateName = "Halt" + IdleStateName = "Idle" + NGStateName = "NG" + WaitSnapshotStateName = "WaitSnapshot" + WaitMicroSnapshotStateName = "WaitMicroSnapshot" + PersistStateName = "Persist" + SyncStateName = "Sync" + HaltStateName = "Halt" ) // Events. @@ -101,10 +121,12 @@ const ( TransactionEvent = "Transaction" HaltEvent = "Halt" - StopSyncEvent = "StopSync" - StopMiningEvent = "StopMining" - StartMiningEvent = "StartMining" - ChangeSyncPeerEvent = "ChangeSyncPeer" + StopSyncEvent = "StopSync" + StopMiningEvent = "StopMining" + StartMiningEvent = "StartMining" + ChangeSyncPeerEvent = "ChangeSyncPeer" + BlockSnapshotEvent = "BlockSnapshotEvent" + MicroBlockSnapshotEvent = "MicroBlockSnapshotEvent" ) type FSM struct { @@ -127,6 +149,7 @@ func NewFSM( services services.Services, microblockInterval, obsolescence time.Duration, syncPeer *network.SyncPeer, + enableLightMode bool, ) (*FSM, Async, error) { if microblockInterval <= 0 { return nil, nil, errors.New("microblock interval must be positive") @@ -158,6 +181,7 @@ func NewFSM( skipMessageList: services.SkipMessageList, syncPeer: syncPeer, + enableLightMode: enableLightMode, } info.scheduler.Reschedule() @@ -187,6 +211,8 @@ func NewFSM( initNGStateInFSM(state, fsm, info) initPersistStateInFSM(state, fsm, info) initSyncStateInFSM(state, fsm, info) + initWaitMicroSnapshotStateInFSM(state, fsm, info) + initWaitSnapshotStateInFSM(state, fsm, info) return &FSM{ fsm: fsm, @@ -290,3 +316,15 @@ func (f *FSM) ChangeSyncPeer(p peer.Peer) (Async, error) { err := f.fsm.Fire(ChangeSyncPeerEvent, asyncRes, p) return *asyncRes, err } + +func (f *FSM) BlockSnapshot(p peer.Peer, blockID proto.BlockID, snapshots proto.BlockSnapshot) (Async, error) { + asyncRes := &Async{} + err := f.fsm.Fire(BlockSnapshotEvent, asyncRes, p, blockID, snapshots) + return *asyncRes, err +} + +func (f *FSM) MicroBlockSnapshot(p peer.Peer, blockID proto.BlockID, snapshots proto.BlockSnapshot) (Async, error) { + asyncRes := &Async{} + err := f.fsm.Fire(MicroBlockSnapshotEvent, asyncRes, p, blockID, snapshots) + return *asyncRes, err +} diff --git a/pkg/node/fsm/fsm_common.go b/pkg/node/fsm/fsm_common.go index bb842e11b..29f9c793c 100644 --- a/pkg/node/fsm/fsm_common.go +++ b/pkg/node/fsm/fsm_common.go @@ -66,6 +66,16 @@ func eventArgsTypes(event stateless.Trigger) []reflect.Type { reflect.TypeOf(&Async{}), reflect.TypeOf((*peer.Peer)(nil)).Elem(), reflect.TypeOf((*proto.Transaction)(nil)).Elem(), } + case BlockSnapshotEvent: + return []reflect.Type{ + reflect.TypeOf(&Async{}), reflect.TypeOf((*peer.Peer)(nil)).Elem(), reflect.TypeOf(proto.BlockID{}), + reflect.TypeOf(proto.BlockSnapshot{}), + } + case MicroBlockSnapshotEvent: + return []reflect.Type{ + reflect.TypeOf(&Async{}), reflect.TypeOf((*peer.Peer)(nil)).Elem(), reflect.TypeOf(proto.BlockID{}), + reflect.TypeOf(proto.BlockSnapshot{}), + } default: return nil } @@ -77,7 +87,11 @@ func syncWithNewPeer(state State, baseInfo BaseInfo, p peer.Peer) (State, Async, if err != nil { return state, nil, err } - internal := sync_internal.InternalFromLastSignatures(extension.NewPeerExtension(p, baseInfo.scheme), lastSignatures) + internal := sync_internal.InternalFromLastSignatures( + extension.NewPeerExtension(p, baseInfo.scheme), + lastSignatures, + baseInfo.enableLightMode, + ) c := conf{ peerSyncWith: p, timeout: defaultSyncTimeout, @@ -200,3 +214,59 @@ func validateEventArgs(event stateless.Trigger, args ...interface{}) { } } } + +func broadcastMicroBlockInv(info BaseInfo, inv *proto.MicroBlockInv) error { + invBts, err := inv.MarshalBinary() + if err != nil { + return errors.Wrapf(err, "failed to marshal binary '%T'", inv) + } + var ( + cnt int + msg = &proto.MicroBlockInvMessage{ + Body: invBts, + } + ) + info.peers.EachConnected(func(p peer.Peer, _ *proto.Score) { + p.SendMessage(msg) + cnt++ + }) + info.invRequester.Add2Cache(inv.TotalBlockID.Bytes()) // prevent further unnecessary microblock request + zap.S().Named(logging.FSMNamespace).Debugf("Network message '%T' sent to %d peers: blockID='%s', ref='%s'", + msg, cnt, inv.TotalBlockID, inv.Reference, + ) + return nil +} + +func processScoreAfterApplyingOrReturnToNG( + state State, + baseInfo BaseInfo, + scores []ReceivedScore, + cache blockStatesCache, +) (State, Async, error) { + for _, s := range scores { + if err := baseInfo.peers.UpdateScore(s.Peer, s.Score); err != nil { + zap.S().Named(logging.FSMNamespace).Debugf("Error: %v", proto.NewInfoMsg(err)) + continue + } + nodeScore, err := baseInfo.storage.CurrentScore() + if err != nil { + zap.S().Named(logging.FSMNamespace).Debugf("Error: %v", proto.NewInfoMsg(err)) + continue + } + if s.Score.Cmp(nodeScore) == 1 { + // received score is larger than local score + newS, task, errS := syncWithNewPeer(state, baseInfo, s.Peer) + if errS != nil { + zap.S().Errorf("%v", state.Errorf(errS)) + continue + } + if newSName := newS.String(); newSName != SyncStateName { // sanity check + return newS, task, errors.Errorf("unexpected state %q after sync with peer, want %q", + newSName, SyncStateName, + ) + } + return newS, task, nil + } + } + return newNGStateWithCache(baseInfo, cache), nil, nil +} diff --git a/pkg/node/fsm/halt_state.go b/pkg/node/fsm/halt_state.go index bb9ffb84f..2d2722a76 100644 --- a/pkg/node/fsm/halt_state.go +++ b/pkg/node/fsm/halt_state.go @@ -54,7 +54,11 @@ func initHaltStateInFSM(_ *StateData, fsm *stateless.StateMachine, info BaseInfo proto.ContentIDPBBlock, proto.ContentIDPBMicroBlock, proto.ContentIDPBTransaction, - proto.ContentIDGetBlockIds, + proto.ContentIDGetBlockIDs, + proto.ContentIDBlockSnapshot, + proto.ContentIDGetBlockSnapshot, + proto.ContentIDMicroBlockSnapshot, + proto.ContentIDMicroBlockSnapshotRequest, } fsm.Configure(HaltStateName). OnEntry(func(ctx context.Context, args ...interface{}) error { @@ -73,5 +77,7 @@ func initHaltStateInFSM(_ *StateData, fsm *stateless.StateMachine, info BaseInfo Ignore(StartMiningEvent). Ignore(ChangeSyncPeerEvent). Ignore(StopMiningEvent). - Ignore(HaltEvent) + Ignore(HaltEvent). + Ignore(BlockSnapshotEvent). + Ignore(MicroBlockSnapshotEvent) } diff --git a/pkg/node/fsm/idle_state.go b/pkg/node/fsm/idle_state.go index ee3c06283..c915419a1 100644 --- a/pkg/node/fsm/idle_state.go +++ b/pkg/node/fsm/idle_state.go @@ -68,6 +68,8 @@ func (a *IdleState) Task(task tasks.AsyncTask) (State, Async, error) { return a, nil, nil case tasks.MineMicro: // Do nothing return a, nil, nil + case tasks.SnapshotTimeout: + return a, nil, nil default: return a, nil, a.Errorf(errors.Errorf( "unexpected internal task '%d' with data '%+v' received by %s State", @@ -108,7 +110,10 @@ func initIdleStateInFSM(state *StateData, fsm *stateless.StateMachine, b BaseInf proto.ContentIDPBBlock, proto.ContentIDPBMicroBlock, proto.ContentIDPBTransaction, - proto.ContentIDBlockIds, + proto.ContentIDBlockIDs, + proto.ContentIDBlockSnapshot, + proto.ContentIDMicroBlockSnapshot, + proto.ContentIDMicroBlockSnapshotRequest, } fsm.Configure(IdleStateName). OnEntry(func(ctx context.Context, args ...interface{}) error { @@ -122,6 +127,8 @@ func initIdleStateInFSM(state *StateData, fsm *stateless.StateMachine, b BaseInf Ignore(StopSyncEvent). Ignore(ChangeSyncPeerEvent). Ignore(StopMiningEvent). + Ignore(BlockSnapshotEvent). + Ignore(MicroBlockSnapshotEvent). PermitDynamic(StartMiningEvent, createPermitDynamicCallback(StartMiningEvent, state, func(args ...interface{}) (State, Async, error) { a, ok := state.State.(*IdleState) diff --git a/pkg/node/fsm/ng/inv_request.go b/pkg/node/fsm/ng/inv_request.go index b79b8b0a1..c9e366dd7 100644 --- a/pkg/node/fsm/ng/inv_request.go +++ b/pkg/node/fsm/ng/inv_request.go @@ -25,8 +25,8 @@ func (a *InvRequesterImpl) Add2Cache(id []byte) (existed bool) { return false } -func (a *InvRequesterImpl) Request(p types.MessageSender, id []byte) (existed bool) { - existed = a.Add2Cache(id) +func (a *InvRequesterImpl) Request(p types.MessageSender, id []byte) bool { + existed := a.Add2Cache(id) if !existed { p.SendMessage(&proto.MicroBlockRequestMessage{ TotalBlockSig: id, diff --git a/pkg/node/fsm/ng/inv_request_test.go b/pkg/node/fsm/ng/inv_request_test.go index 0e9ee246b..951896cbc 100644 --- a/pkg/node/fsm/ng/inv_request_test.go +++ b/pkg/node/fsm/ng/inv_request_test.go @@ -26,5 +26,4 @@ func TestInvRequesterImpl_Request(t *testing.T) { n.Request(buf, proto.NewBlockIDFromSignature(crypto.Signature{}).Bytes()) require.Equal(t, 1, len(buf.messages)) - } diff --git a/pkg/node/fsm/ng_state.go b/pkg/node/fsm/ng_state.go index 6feeddded..610866af2 100644 --- a/pkg/node/fsm/ng_state.go +++ b/pkg/node/fsm/ng_state.go @@ -12,6 +12,7 @@ import ( "github.com/wavesplatform/gowaves/pkg/miner" "github.com/wavesplatform/gowaves/pkg/node/fsm/tasks" "github.com/wavesplatform/gowaves/pkg/p2p/peer" + "github.com/wavesplatform/gowaves/pkg/p2p/peer/extension" "github.com/wavesplatform/gowaves/pkg/proto" "github.com/wavesplatform/gowaves/pkg/state" ) @@ -25,7 +26,15 @@ func newNGState(baseInfo BaseInfo) State { baseInfo.syncPeer.Clear() return &NGState{ baseInfo: baseInfo, - blocksCache: blockStatesCache{blockStates: map[proto.BlockID]proto.Block{}}, + blocksCache: newBlockStatesCache(), + } +} + +func newNGStateWithCache(baseInfo BaseInfo, cache blockStatesCache) State { + baseInfo.syncPeer.Clear() + return &NGState{ + baseInfo: baseInfo, + blocksCache: cache, } } @@ -60,6 +69,8 @@ func (a *NGState) Task(task tasks.AsyncTask) (State, Async, error) { "unexpected type %T, expected 'tasks.MineMicroTaskData'", task.Data)) } return a.mineMicro(t.Block, t.Limits, t.KeyPair, t.Vrf) + case tasks.SnapshotTimeout: + return a, nil, nil default: return a, nil, a.Errorf(errors.Errorf( "unexpected internal task '%d' with data '%+v' received by %s State", @@ -90,7 +101,42 @@ func (a *NGState) rollbackToStateFromCache(blockFromCache *proto.Block) error { return errors.Wrapf(err, "failed to rollback to parent block '%s' of cached block '%s'", previousBlockID.String(), blockFromCache.ID.String()) } - _, err = a.baseInfo.blocksApplier.Apply(a.baseInfo.storage, []*proto.Block{blockFromCache}) + _, err = a.baseInfo.blocksApplier.Apply( + a.baseInfo.storage, + []*proto.Block{blockFromCache}, + ) + if err != nil { + return errors.Wrapf(err, "failed to apply cached block %q", blockFromCache.ID.String()) + } + return nil +} + +func (a *NGState) rollbackToStateFromCacheInLightNode(parentID proto.BlockID) error { + blockFromCache, okB := a.blocksCache.Get(parentID) + snapshotFromCache, okS := a.blocksCache.GetSnapshot(parentID) + if !okB && !okS { + // no blocks in cache + return nil + } + if !okS || !okB { + if !okS { + return a.Errorf(errors.Errorf("snapshot for block %s doesn't exist in cache", parentID.String())) + } + return a.Errorf(errors.Errorf("block %s doesn't existin cache", parentID.String())) + } + zap.S().Named(logging.FSMNamespace).Debugf("[%s] Re-applying block '%s' from cache", + a, blockFromCache.ID.String()) + previousBlockID := blockFromCache.Parent + err := a.baseInfo.storage.RollbackTo(previousBlockID) + if err != nil { + return errors.Wrapf(err, "failed to rollback to parent block '%s' of cached block '%s'", + previousBlockID.String(), blockFromCache.ID.String()) + } + _, err = a.baseInfo.blocksApplier.ApplyWithSnapshots( + a.baseInfo.storage, + []*proto.Block{blockFromCache}, + []*proto.BlockSnapshot{snapshotFromCache}, + ) if err != nil { return errors.Wrapf(err, "failed to apply cached block %q", blockFromCache.ID.String()) } @@ -114,28 +160,38 @@ func (a *NGState) Block(peer peer.Peer, block *proto.Block) (State, Async, error "[%s] Key-block '%s' has parent '%s' which is not the top block '%s'", a, block.ID.String(), block.Parent.String(), top.ID.String(), ) - var blockFromCache *proto.Block - if blockFromCache, ok = a.blocksCache.Get(block.Parent); ok { - zap.S().Named(logging.FSMNamespace).Debugf("[%s] Re-applying block '%s' from cache", - a, blockFromCache.ID.String()) - if err = a.rollbackToStateFromCache(blockFromCache); err != nil { + if a.baseInfo.enableLightMode { + if err = a.rollbackToStateFromCacheInLightNode(block.Parent); err != nil { return a, nil, a.Errorf(err) } + } else { + if blockFromCache, okGet := a.blocksCache.Get(block.Parent); okGet { + zap.S().Named(logging.FSMNamespace).Debugf("[%s] Re-applying block '%s' from cache", + a, blockFromCache.ID.String()) + if err = a.rollbackToStateFromCache(blockFromCache); err != nil { + return a, nil, a.Errorf(err) + } + } } } - _, err = a.baseInfo.blocksApplier.Apply(a.baseInfo.storage, []*proto.Block{block}) + if a.baseInfo.enableLightMode { + defer func() { + pe := extension.NewPeerExtension(peer, a.baseInfo.scheme) + pe.AskBlockSnapshot(block.BlockID()) + }() + st, timeoutTask := newWaitSnapshotState(a.baseInfo, block, a.blocksCache) + return st, tasks.Tasks(timeoutTask), nil + } + _, err = a.baseInfo.blocksApplier.Apply( + a.baseInfo.storage, + []*proto.Block{block}, + ) if err != nil { - metrics.FSMKeyBlockDeclined("ng", block, err) - return a, nil, a.Errorf(errors.Wrapf(err, "peer '%s'", peer.ID())) + return a, nil, a.Errorf(errors.Wrapf(err, "failed to apply block %s", block.BlockID())) } - metrics.FSMKeyBlockApplied("ng", block) - zap.S().Named(logging.FSMNamespace).Debugf("[%s] Handle received key block message: block '%s' applied to state", - a, block.BlockID()) - a.blocksCache.Clear() a.blocksCache.AddBlockState(block) - a.baseInfo.scheduler.Reschedule() a.baseInfo.actions.SendScore(a.baseInfo.storage) a.baseInfo.CleanUtx() @@ -149,7 +205,10 @@ func (a *NGState) MinedBlock( metrics.FSMKeyBlockGenerated("ng", block) err := a.baseInfo.storage.Map(func(state state.NonThreadSafeState) error { var err error - _, err = a.baseInfo.blocksApplier.Apply(state, []*proto.Block{block}) + _, err = a.baseInfo.blocksApplier.Apply( + state, + []*proto.Block{block}, + ) return err }) if err != nil { @@ -162,39 +221,37 @@ func (a *NGState) MinedBlock( a.blocksCache.Clear() a.blocksCache.AddBlockState(block) - a.baseInfo.scheduler.Reschedule() a.baseInfo.actions.SendBlock(block) a.baseInfo.actions.SendScore(a.baseInfo.storage) a.baseInfo.CleanUtx() - a.blocksCache = blockStatesCache{blockStates: map[proto.BlockID]proto.Block{}} + a.blocksCache = newBlockStatesCache() return a, tasks.Tasks(tasks.NewMineMicroTask(0, block, limits, keyPair, vrf)), nil } func (a *NGState) MicroBlock(p peer.Peer, micro *proto.MicroBlock) (State, Async, error) { metrics.FSMMicroBlockReceived("ng", micro, p.Handshake().NodeName) - block, err := a.checkAndAppendMicroblock(micro) // the TopBlock() is used here - if err != nil { - metrics.FSMMicroBlockDeclined("ng", micro, err) - return a, nil, a.Errorf(err) - } - zap.S().Named(logging.FSMNamespace).Debugf( - "[%s] Received microblock '%s' (referencing '%s') successfully applied to state", - a, block.BlockID(), micro.Reference, - ) - a.baseInfo.MicroBlockCache.Add(block.BlockID(), micro) - a.blocksCache.AddBlockState(block) - a.baseInfo.scheduler.Reschedule() - - // Notify all connected peers about new microblock, send them microblock inv network message - if inv, ok := a.baseInfo.MicroBlockInvCache.Get(block.BlockID()); ok { - //TODO: We have to exclude from recipients peers that already have this microblock - if err = a.broadcastMicroBlockInv(inv); err != nil { - return a, nil, a.Errorf(errors.Wrap(err, "failed to handle microblock message")) + if !a.baseInfo.enableLightMode { + block, err := a.checkAndAppendMicroBlock(micro, nil) // the TopBlock() is used here + if err != nil { + metrics.FSMMicroBlockDeclined("ng", micro, err) + return a, nil, a.Errorf(err) } + zap.S().Named(logging.FSMNamespace).Debugf( + "[%s] Received microblock '%s' (referencing '%s') successfully applied to state", + a, block.BlockID(), micro.Reference, + ) + a.baseInfo.MicroBlockCache.AddMicroBlock(block.BlockID(), micro) + a.blocksCache.AddBlockState(block) + return a, nil, nil } - return a, nil, nil + defer func() { + pe := extension.NewPeerExtension(p, a.baseInfo.scheme) + pe.AskMicroBlockSnapshot(micro.TotalBlockID) + }() + st, timeoutTask := newWaitMicroSnapshotState(a.baseInfo, micro, a.blocksCache) + return st, tasks.Tasks(timeoutTask), nil } // mineMicro handles a new microblock generated by miner. @@ -235,40 +292,21 @@ func (a *NGState) mineMicro( return a, nil, a.Errorf(err) } - if err = a.broadcastMicroBlockInv(inv); err != nil { + if err = broadcastMicroBlockInv(a.baseInfo, inv); err != nil { return a, nil, a.Errorf(errors.Wrap(err, "failed to broadcast generated microblock")) } - a.baseInfo.MicroBlockCache.Add(block.BlockID(), micro) + a.baseInfo.MicroBlockCache.AddMicroBlock(block.BlockID(), micro) a.baseInfo.MicroBlockInvCache.Add(block.BlockID(), inv) return a, tasks.Tasks(tasks.NewMineMicroTask(a.baseInfo.microblockInterval, block, rest, keyPair, vrf)), nil } -func (a *NGState) broadcastMicroBlockInv(inv *proto.MicroBlockInv) error { - invBts, err := inv.MarshalBinary() - if err != nil { - return errors.Wrapf(err, "failed to marshal binary '%T'", inv) - } - var ( - cnt int - msg = &proto.MicroBlockInvMessage{ - Body: invBts, - } - ) - a.baseInfo.peers.EachConnected(func(p peer.Peer, score *proto.Score) { - p.SendMessage(msg) - cnt++ - }) - a.baseInfo.invRequester.Add2Cache(inv.TotalBlockID.Bytes()) // prevent further unnecessary microblock request - zap.S().Named(logging.FSMNamespace).Debugf("Network message '%T' sent to %d peers: blockID='%s', ref='%s'", - msg, cnt, inv.TotalBlockID, inv.Reference, - ) - return nil -} - -// checkAndAppendMicroblock checks that microblock is appendable and appends it. -func (a *NGState) checkAndAppendMicroblock(micro *proto.MicroBlock) (*proto.Block, error) { +// checkAndAppendMicroBlock checks that microblock is appendable and appends it. +func (a *NGState) checkAndAppendMicroBlock( + micro *proto.MicroBlock, + snapshot *proto.BlockSnapshot, +) (*proto.Block, error) { top := a.baseInfo.storage.TopBlock() // Get the last block if top.BlockID() != micro.Reference { // Microblock doesn't refer to last block err := errors.Errorf("microblock TBID '%s' refer to block ID '%s' but last block ID is '%s'", @@ -301,10 +339,31 @@ func (a *NGState) checkAndAppendMicroblock(micro *proto.MicroBlock) (*proto.Bloc if err != nil { return nil, errors.Wrap(err, "NGState microBlockByID: failed generate block id") } - err = a.baseInfo.storage.Map(func(state state.State) error { - _, er := a.baseInfo.blocksApplier.ApplyMicro(state, newBlock) - return er - }) + snapshotsToApply := snapshot + if snapshot != nil { + h, errBToH := a.baseInfo.storage.BlockIDToHeight(top.BlockID()) + if errBToH != nil { + return nil, errBToH + } + topBlockSnapshots, errSAtH := a.baseInfo.storage.SnapshotsAtHeight(h) + if errSAtH != nil { + return nil, errSAtH + } + + topBlockSnapshots.AppendTxSnapshots(snapshot.TxSnapshots) + + snapshotsToApply = &topBlockSnapshots + err = a.baseInfo.storage.Map(func(state state.State) error { + _, er := a.baseInfo.blocksApplier.ApplyMicroWithSnapshots(state, newBlock, snapshotsToApply) + return er + }) + } else { + err = a.baseInfo.storage.Map(func(state state.State) error { + _, er := a.baseInfo.blocksApplier.ApplyMicro(state, newBlock) + return er + }) + } + if err != nil { metrics.FSMMicroBlockDeclined("ng", micro, err) return nil, errors.Wrap(err, "failed to apply created from micro block") @@ -315,7 +374,7 @@ func (a *NGState) checkAndAppendMicroblock(micro *proto.MicroBlock) (*proto.Bloc func (a *NGState) MicroBlockInv(p peer.Peer, inv *proto.MicroBlockInv) (State, Async, error) { metrics.MicroBlockInv(inv, p.Handshake().NodeName) - existed := a.baseInfo.invRequester.Request(p, inv.TotalBlockID.Bytes()) // TODO: add logs about microblock request + existed := a.baseInfo.invRequester.Request(p, inv.TotalBlockID.Bytes()) if existed { zap.S().Named(logging.FSMNamespace).Debugf("[%s] Microblock inv received: block '%s' already in cache", a, inv.TotalBlockID) @@ -333,6 +392,14 @@ func (a *NGState) Halt() (State, Async, error) { type blockStatesCache struct { blockStates map[proto.BlockID]proto.Block + snapshots map[proto.BlockID]proto.BlockSnapshot +} + +func newBlockStatesCache() blockStatesCache { + return blockStatesCache{ + blockStates: map[proto.BlockID]proto.Block{}, + snapshots: map[proto.BlockID]proto.BlockSnapshot{}, + } } func (c *blockStatesCache) AddBlockState(block *proto.Block) { @@ -341,8 +408,15 @@ func (c *blockStatesCache) AddBlockState(block *proto.Block) { block.ID.String(), len(c.blockStates)) } +func (c *blockStatesCache) AddSnapshot(blockID proto.BlockID, snapshot proto.BlockSnapshot) { + c.snapshots[blockID] = snapshot + zap.S().Named(logging.FSMNamespace).Debugf("[NG] Snapshot '%s' added to cache, total snapshots in cache: %d", + blockID.String(), len(c.snapshots)) +} + func (c *blockStatesCache) Clear() { c.blockStates = map[proto.BlockID]proto.Block{} + c.snapshots = map[proto.BlockID]proto.BlockSnapshot{} zap.S().Named(logging.FSMNamespace).Debug("[NG] Block cache is empty") } @@ -354,8 +428,19 @@ func (c *blockStatesCache) Get(blockID proto.BlockID) (*proto.Block, bool) { return &block, true } +func (c *blockStatesCache) GetSnapshot(blockID proto.BlockID) (*proto.BlockSnapshot, bool) { + snapshot, ok := c.snapshots[blockID] + if !ok { + return nil, false + } + return &snapshot, true +} + func initNGStateInFSM(state *StateData, fsm *stateless.StateMachine, info BaseInfo) { - var ngSkipMessageList proto.PeerMessageIDs + var ngSkipMessageList = proto.PeerMessageIDs{ + proto.ContentIDMicroBlockSnapshot, + proto.ContentIDBlockSnapshot, + } fsm.Configure(NGStateName). OnEntry(func(ctx context.Context, args ...interface{}) error { info.skipMessageList.SetList(ngSkipMessageList) @@ -365,6 +450,8 @@ func initNGStateInFSM(state *StateData, fsm *stateless.StateMachine, info BaseIn Ignore(StartMiningEvent). Ignore(ChangeSyncPeerEvent). Ignore(StopSyncEvent). + Ignore(BlockSnapshotEvent). + Ignore(MicroBlockSnapshotEvent). PermitDynamic(StopMiningEvent, createPermitDynamicCallback(StopMiningEvent, state, func(args ...interface{}) (State, Async, error) { a, ok := state.State.(*NGState) diff --git a/pkg/node/fsm/persist_state.go b/pkg/node/fsm/persist_state.go index f3bb471bf..ca1162575 100644 --- a/pkg/node/fsm/persist_state.go +++ b/pkg/node/fsm/persist_state.go @@ -76,7 +76,11 @@ func initPersistStateInFSM(state *StateData, fsm *stateless.StateMachine, info B proto.ContentIDPBBlock, proto.ContentIDPBMicroBlock, proto.ContentIDPBTransaction, - proto.ContentIDGetBlockIds, + proto.ContentIDGetBlockIDs, + proto.ContentIDBlockSnapshot, + proto.ContentIDMicroBlockSnapshot, + proto.ContentIDGetBlockSnapshot, + proto.ContentIDMicroBlockSnapshotRequest, } fsm.Configure(PersistStateName). Ignore(BlockEvent). @@ -88,6 +92,8 @@ func initPersistStateInFSM(state *StateData, fsm *stateless.StateMachine, info B Ignore(StartMiningEvent). Ignore(ChangeSyncPeerEvent). Ignore(StopSyncEvent). + Ignore(BlockSnapshotEvent). + Ignore(MicroBlockSnapshotEvent). OnEntry(func(ctx context.Context, args ...interface{}) error { info.skipMessageList.SetList(persistSkipMessageList) return nil diff --git a/pkg/node/fsm/sync_internal/internal.go b/pkg/node/fsm/sync_internal/internal.go index 80a713ca5..79c09a3a0 100644 --- a/pkg/node/fsm/sync_internal/internal.go +++ b/pkg/node/fsm/sync_internal/internal.go @@ -10,6 +10,7 @@ import ( ) type Blocks []*proto.Block +type Snapshots []*proto.BlockSnapshot type Eof = bool type BlockApplied bool @@ -19,24 +20,36 @@ var UnexpectedBlockErr = proto.NewInfoMsg(errors.New("unexpected block")) type PeerExtension interface { AskBlocksIDs(id []proto.BlockID) AskBlock(id proto.BlockID) + AskBlockSnapshot(id proto.BlockID) } type Internal struct { respondedSignatures *signatures.BlockIDs orderedBlocks *ordered_blocks.OrderedBlocks waitingForSignatures bool + isLightNode bool } -func InternalFromLastSignatures(p extension.PeerExtension, signatures *signatures.ReverseOrdering) Internal { +func InternalFromLastSignatures( + p extension.PeerExtension, + signatures *signatures.ReverseOrdering, + isLightNode bool, +) Internal { p.AskBlocksIDs(signatures.BlockIDS()) - return NewInternal(ordered_blocks.NewOrderedBlocks(), signatures, true) + return NewInternal(ordered_blocks.NewOrderedBlocks(), signatures, true, isLightNode) } -func NewInternal(orderedBlocks *ordered_blocks.OrderedBlocks, respondedSignatures *signatures.ReverseOrdering, waitingForSignatures bool) Internal { +func NewInternal( + orderedBlocks *ordered_blocks.OrderedBlocks, + respondedSignatures *signatures.ReverseOrdering, + waitingForSignatures bool, + isLightNode bool, +) Internal { return Internal{ respondedSignatures: respondedSignatures, orderedBlocks: orderedBlocks, waitingForSignatures: waitingForSignatures, + isLightNode: isLightNode, } } @@ -52,10 +65,13 @@ func (a Internal) BlockIDs(p PeerExtension, ids []proto.BlockID) (Internal, erro newIDs = append(newIDs, id) if a.orderedBlocks.Add(id) { p.AskBlock(id) + if a.isLightNode { + p.AskBlockSnapshot(id) + } } } respondedSignatures := signatures.NewSignatures(newIDs...).Revert() - return NewInternal(a.orderedBlocks, respondedSignatures, false), nil + return NewInternal(a.orderedBlocks, respondedSignatures, false, a.isLightNode), nil } func (a Internal) WaitingForSignatures() bool { @@ -70,26 +86,39 @@ func (a Internal) Block(block *proto.Block) (Internal, error) { return a, nil } +func (a Internal) SetSnapshot(blockID proto.BlockID, snapshot *proto.BlockSnapshot) (Internal, error) { + if !a.orderedBlocks.Contains(blockID) { + return a, UnexpectedBlockErr + } + a.orderedBlocks.SetSnapshot(blockID, snapshot) + return a, nil +} + type peerExtension interface { AskBlocksIDs(id []proto.BlockID) } -func (a Internal) Blocks(p peerExtension) (Internal, Blocks, Eof) { +func (a Internal) Blocks() (Internal, Blocks, Snapshots, Eof) { if a.waitingForSignatures { - return NewInternal(a.orderedBlocks, a.respondedSignatures, a.waitingForSignatures), nil, false + return NewInternal(a.orderedBlocks, a.respondedSignatures, a.waitingForSignatures, a.isLightNode), nil, nil, false } - if a.orderedBlocks.RequestedCount() > a.orderedBlocks.ReceivedCount() { - return NewInternal(a.orderedBlocks, a.respondedSignatures, a.waitingForSignatures), nil, false + if a.orderedBlocks.RequestedCount() > a.orderedBlocks.ReceivedCount(a.isLightNode) { + return NewInternal(a.orderedBlocks, a.respondedSignatures, a.waitingForSignatures, a.isLightNode), nil, nil, false } if a.orderedBlocks.RequestedCount() < 100 { - return NewInternal(a.orderedBlocks, a.respondedSignatures, false), a.orderedBlocks.PopAll(), true + bs, ss := a.orderedBlocks.PopAll(a.isLightNode) + return NewInternal(a.orderedBlocks, a.respondedSignatures, false, a.isLightNode), bs, ss, true } + bs, ss := a.orderedBlocks.PopAll(a.isLightNode) + return NewInternal(a.orderedBlocks, a.respondedSignatures, true, a.isLightNode), bs, ss, false +} + +func (a Internal) AskBlocksIDs(p peerExtension) { p.AskBlocksIDs(a.respondedSignatures.BlockIDS()) - return NewInternal(a.orderedBlocks, a.respondedSignatures, true), a.orderedBlocks.PopAll(), false } func (a Internal) AvailableCount() int { - return a.orderedBlocks.ReceivedCount() + return a.orderedBlocks.ReceivedCount(a.isLightNode) } func (a Internal) RequestedCount() int { diff --git a/pkg/node/fsm/sync_internal/internal_test.go b/pkg/node/fsm/sync_internal/internal_test.go index 91c9845d1..3c9546017 100644 --- a/pkg/node/fsm/sync_internal/internal_test.go +++ b/pkg/node/fsm/sync_internal/internal_test.go @@ -15,10 +15,14 @@ import ( type noopWrapper struct { } -func (noopWrapper) AskBlocksIDs(id []proto.BlockID) { +func (noopWrapper) AskBlocksIDs(_ []proto.BlockID) { } -func (noopWrapper) AskBlock(id proto.BlockID) { +func (noopWrapper) AskBlock(_ proto.BlockID) { +} + +func (noopWrapper) AskBlockSnapshot(_ proto.BlockID) { + } var sig1 = crypto.MustSignatureFromBase58("5syuWANDSgk8KyPxq2yQs2CYV23QfnrBoZMSv2LaciycxDYfBw6cLA2SqVnonnh1nFiFumzTgy2cPETnE7ZaZg5P") @@ -37,14 +41,14 @@ func TestSigFSM_Signatures(t *testing.T) { sigs := signatures.NewSignatures() t.Run("error on receive unexpected signatures", func(t *testing.T) { - fsm := NewInternal(or, sigs, false) + fsm := NewInternal(or, sigs, false, false) rs2, err := fsm.BlockIDs(nil, blocksFromSigs(sig1, sig2)) require.Equal(t, NoSignaturesExpectedErr, err) require.NotNil(t, rs2) }) t.Run("successful receive signatures", func(t *testing.T) { - fsm := NewInternal(or, sigs, true) + fsm := NewInternal(or, sigs, true, false) rs2, err := fsm.BlockIDs(noopWrapper{}, blocksFromSigs(sig1, sig2)) require.NoError(t, err) require.NotNil(t, rs2) @@ -63,7 +67,7 @@ func block(sig crypto.Signature) *proto.Block { func TestSigFSM_Block(t *testing.T) { or := ordered_blocks.NewOrderedBlocks() sigs := signatures.NewSignatures() - fsm := NewInternal(or, sigs, true) + fsm := NewInternal(or, sigs, true, false) fsm, _ = fsm.BlockIDs(noopWrapper{}, blocksFromSigs(sig1, sig2)) fsm, _ = fsm.Block(block(sig1)) @@ -71,13 +75,13 @@ func TestSigFSM_Block(t *testing.T) { require.Equal(t, 2, fsm.AvailableCount()) // no panic, cause `nearEnd` is True - _, blocks, _ := fsm.Blocks(nil) + _, blocks, _, _ := fsm.Blocks() require.Equal(t, 2, len(blocks)) } func TestSigFSM_BlockGetSignatures(t *testing.T) { or := ordered_blocks.NewOrderedBlocks() sigs := signatures.NewSignatures() - _, bs, _ := NewInternal(or, sigs, false).Blocks(nil) + _, bs, _, _ := NewInternal(or, sigs, false, false).Blocks() require.Nil(t, bs) } diff --git a/pkg/node/fsm/sync_state.go b/pkg/node/fsm/sync_state.go index 1fc480843..4643c8e71 100644 --- a/pkg/node/fsm/sync_state.go +++ b/pkg/node/fsm/sync_state.go @@ -38,12 +38,6 @@ func (c conf) Now(tm types.Time) conf { } } -type noopWrapper struct{} - -func (noopWrapper) AskBlocksIDs([]proto.BlockID) {} - -func (noopWrapper) AskBlock(proto.BlockID) {} - type SyncState struct { baseInfo BaseInfo conf conf @@ -71,12 +65,22 @@ func (a *SyncState) Transaction(p peer.Peer, t proto.Transaction) (State, Async, } func (a *SyncState) StopSync() (State, Async, error) { - _, blocks, _ := a.internal.Blocks(noopWrapper{}) + _, blocks, snapshots, _ := a.internal.Blocks() if len(blocks) > 0 { - err := a.baseInfo.storage.Map(func(s state.NonThreadSafeState) error { - _, err := a.baseInfo.blocksApplier.Apply(s, blocks) - return err - }) + var err error + if a.baseInfo.enableLightMode { + err = a.baseInfo.storage.Map(func(s state.NonThreadSafeState) error { + var errApply error + _, errApply = a.baseInfo.blocksApplier.ApplyWithSnapshots(s, blocks, snapshots) + return errApply + }) + } else { + err = a.baseInfo.storage.Map(func(s state.NonThreadSafeState) error { + var errApply error + _, errApply = a.baseInfo.blocksApplier.Apply(s, blocks) + return errApply + }) + } return newIdleState(a.baseInfo), nil, a.Errorf(err) } return newIdleState(a.baseInfo), nil, nil @@ -105,6 +109,8 @@ func (a *SyncState) Task(task tasks.AsyncTask) (State, Async, error) { return a, nil, nil case tasks.MineMicro: return a, nil, nil + case tasks.SnapshotTimeout: + return a, nil, nil default: return a, nil, a.Errorf(errors.Errorf( "unexpected internal task '%d' with data '%+v' received by %s State", @@ -163,11 +169,28 @@ func (a *SyncState) Block(p peer.Peer, block *proto.Block) (State, Async, error) } metrics.FSMKeyBlockReceived("sync", block, p.Handshake().NodeName) zap.S().Named(logging.FSMNamespace).Debugf("[Sync][%s] Received block %s", p.ID(), block.ID.String()) + internal, err := a.internal.Block(block) if err != nil { return newSyncState(a.baseInfo, a.conf, internal), nil, a.Errorf(err) } - return a.applyBlocks(a.baseInfo, a.conf.Now(a.baseInfo.tm), internal) + return a.applyBlocksWithSnapshots(a.baseInfo, a.conf.Now(a.baseInfo.tm), internal) +} + +func (a *SyncState) BlockSnapshot( + p peer.Peer, + blockID proto.BlockID, + snapshot proto.BlockSnapshot, +) (State, Async, error) { + if !p.Equal(a.conf.peerSyncWith) { + return a, nil, nil + } + zap.S().Named(logging.FSMNamespace).Debugf("[Sync][%s] Received snapshot for block %s", p.ID(), blockID.String()) + internal, err := a.internal.SetSnapshot(blockID, &snapshot) + if err != nil { + return newSyncState(a.baseInfo, a.conf, internal), nil, a.Errorf(err) + } + return a.applyBlocksWithSnapshots(a.baseInfo, a.conf.Now(a.baseInfo.tm), internal) } func (a *SyncState) MinedBlock( @@ -175,7 +198,10 @@ func (a *SyncState) MinedBlock( ) (State, Async, error) { metrics.FSMKeyBlockGenerated("sync", block) zap.S().Named(logging.FSMNamespace).Infof("[Sync] New block '%s' mined", block.ID.String()) - _, err := a.baseInfo.blocksApplier.Apply(a.baseInfo.storage, []*proto.Block{block}) + _, err := a.baseInfo.blocksApplier.Apply( + a.baseInfo.storage, + []*proto.Block{block}, + ) if err != nil { zap.S().Warnf("[Sync] Failed to apply mined block: %v", err) return a, nil, nil // We've failed to apply mined block, it's not an error @@ -213,19 +239,29 @@ func (a *SyncState) changePeerIfRequired() (peer.Peer, bool) { } // TODO suspend peer on state error -func (a *SyncState) applyBlocks( +func (a *SyncState) applyBlocksWithSnapshots( baseInfo BaseInfo, conf conf, internal sync_internal.Internal, ) (State, Async, error) { - internal, blocks, eof := internal.Blocks(extension.NewPeerExtension(a.conf.peerSyncWith, a.baseInfo.scheme)) + internal, blocks, snapshots, eof := internal.Blocks() if len(blocks) == 0 { zap.S().Named(logging.FSMNamespace).Debug("[Sync] No blocks to apply") return newSyncState(baseInfo, conf, internal), nil, nil } - err := a.baseInfo.storage.Map(func(s state.NonThreadSafeState) error { - var err error - _, err = a.baseInfo.blocksApplier.Apply(s, blocks) - return err - }) + var err error + if a.baseInfo.enableLightMode { + err = a.baseInfo.storage.Map(func(s state.NonThreadSafeState) error { + var errApply error + _, errApply = a.baseInfo.blocksApplier.ApplyWithSnapshots(s, blocks, snapshots) + return errApply + }) + } else { + err = a.baseInfo.storage.Map(func(s state.NonThreadSafeState) error { + var errApply error + _, errApply = a.baseInfo.blocksApplier.Apply(s, blocks) + return errApply + }) + } + if err != nil { if errs.IsValidationError(err) || errs.IsValidationError(errors.Cause(err)) { zap.S().Named(logging.FSMNamespace).Debugf( @@ -260,6 +296,7 @@ func (a *SyncState) applyBlocks( } return newNGState(a.baseInfo), nil, nil } + a.internal.AskBlocksIDs(extension.NewPeerExtension(a.conf.peerSyncWith, a.baseInfo.scheme)) return newSyncState(baseInfo, conf, internal), nil, nil } @@ -272,9 +309,18 @@ func initSyncStateInFSM(state *StateData, fsm *stateless.StateMachine, info Base proto.ContentIDMicroblock, proto.ContentIDPBMicroBlock, proto.ContentIDPBTransaction, + proto.ContentIDMicroBlockSnapshot, + proto.ContentIDMicroBlockSnapshotRequest, + } + if !info.enableLightMode { + syncSkipMessageList = append(syncSkipMessageList, proto.ContentIDBlockSnapshot) } fsm.Configure(SyncStateName). - Ignore(MicroBlockEvent).Ignore(MicroBlockInvEvent).Ignore(StartMiningEvent).Ignore(StopMiningEvent). + Ignore(MicroBlockEvent). + Ignore(MicroBlockInvEvent). + Ignore(StartMiningEvent). + Ignore(StopMiningEvent). + Ignore(MicroBlockSnapshotEvent). OnEntry(func(ctx context.Context, args ...interface{}) error { info.skipMessageList.SetList(syncSkipMessageList) return nil @@ -361,5 +407,18 @@ func initSyncStateInFSM(state *StateData, fsm *stateless.StateMachine, info Base "unexpected type '%T' expected '*SyncState'", state.State)) } return a.Halt() + })). + PermitDynamic(BlockSnapshotEvent, + createPermitDynamicCallback(BlockSnapshotEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*SyncState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*SyncState'", state.State)) + } + return a.BlockSnapshot( + convertToInterface[peer.Peer](args[0]), + args[1].(proto.BlockID), + args[2].(proto.BlockSnapshot), + ) })) } diff --git a/pkg/node/fsm/tasks/tasks.go b/pkg/node/fsm/tasks/tasks.go index 1a08caa72..37b223c8a 100644 --- a/pkg/node/fsm/tasks/tasks.go +++ b/pkg/node/fsm/tasks/tasks.go @@ -15,6 +15,7 @@ const ( AskPeers MineMicro PersistComplete + SnapshotTimeout ) // SendAsyncTask sends task into channel with overflow check. @@ -173,3 +174,78 @@ func NewFuncTask(f func(ctx context.Context, output chan AsyncTask) error, taskT _type: taskType, } } + +type SnapshotTimeoutTaskType int + +const ( + BlockSnapshot SnapshotTimeoutTaskType = iota + 1 + MicroBlockSnapshot +) + +type SnapshotTimeoutTaskData struct { + BlockID proto.BlockID + SnapshotTaskType SnapshotTimeoutTaskType +} + +func (SnapshotTimeoutTaskData) taskDataMarker() {} + +type SnapshotTimeoutTask struct { + timeout time.Duration + outdated <-chan struct{} + SnapshotTimeoutTaskData SnapshotTimeoutTaskData +} + +func NewBlockSnapshotTimeoutTask( + timeout time.Duration, + blockID proto.BlockID, + outdated <-chan struct{}, +) SnapshotTimeoutTask { + return SnapshotTimeoutTask{ + timeout: timeout, + outdated: outdated, + SnapshotTimeoutTaskData: SnapshotTimeoutTaskData{ + BlockID: blockID, + SnapshotTaskType: BlockSnapshot, + }, + } +} + +func NewMicroBlockSnapshotTimeoutTask( + timeout time.Duration, + blockID proto.BlockID, + outdated <-chan struct{}, +) SnapshotTimeoutTask { + return SnapshotTimeoutTask{ + timeout: timeout, + outdated: outdated, + SnapshotTimeoutTaskData: SnapshotTimeoutTaskData{ + BlockID: blockID, + SnapshotTaskType: MicroBlockSnapshot, + }, + } +} + +func (SnapshotTimeoutTask) Type() int { + return SnapshotTimeout +} + +func (a SnapshotTimeoutTask) Run(ctx context.Context, output chan AsyncTask) error { + t := time.NewTimer(a.timeout) + defer func() { + if !t.Stop() { + <-t.C + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-a.outdated: + return nil + case <-t.C: + SendAsyncTask(output, AsyncTask{ + TaskType: a.Type(), + Data: a.SnapshotTimeoutTaskData, + }) + return nil + } +} diff --git a/pkg/node/fsm/wait_micro_snapshot.go b/pkg/node/fsm/wait_micro_snapshot.go new file mode 100644 index 000000000..5b38b9ccb --- /dev/null +++ b/pkg/node/fsm/wait_micro_snapshot.go @@ -0,0 +1,273 @@ +package fsm + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/qmuntal/stateless" + "go.uber.org/zap" + + "github.com/wavesplatform/gowaves/pkg/logging" + "github.com/wavesplatform/gowaves/pkg/metrics" + "github.com/wavesplatform/gowaves/pkg/node/fsm/tasks" + "github.com/wavesplatform/gowaves/pkg/p2p/peer" + "github.com/wavesplatform/gowaves/pkg/proto" + "github.com/wavesplatform/gowaves/pkg/state" +) + +const ( + microSnapshotTimeout = 15 * time.Second + scoresSliceMaxSize = 10000 +) + +type WaitMicroSnapshotState struct { + baseInfo BaseInfo + blocksCache blockStatesCache + timeoutTaskOutdated chan<- struct{} + microBlockWaitingForSnapshot *proto.MicroBlock + + receivedScores []ReceivedScore +} + +func newWaitMicroSnapshotState(baseInfo BaseInfo, micro *proto.MicroBlock, cache blockStatesCache) (State, tasks.Task) { + baseInfo.syncPeer.Clear() + timeoutTaskOutdated := make(chan struct{}) + st := &WaitMicroSnapshotState{ + baseInfo: baseInfo, + blocksCache: cache, + timeoutTaskOutdated: timeoutTaskOutdated, + microBlockWaitingForSnapshot: micro, + } + task := tasks.NewMicroBlockSnapshotTimeoutTask(microSnapshotTimeout, micro.TotalBlockID, timeoutTaskOutdated) + return st, task +} + +func (a *WaitMicroSnapshotState) Errorf(err error) error { + return fsmErrorf(a, err) +} + +func (a *WaitMicroSnapshotState) String() string { + return WaitMicroSnapshotStateName +} + +func (a *WaitMicroSnapshotState) Task(task tasks.AsyncTask) (State, Async, error) { + switch task.TaskType { + case tasks.Ping: + return a, nil, nil + case tasks.AskPeers: + zap.S().Named(logging.FSMNamespace).Debugf("[%s] Requesting peers", a) + a.baseInfo.peers.AskPeers() + return a, nil, nil + case tasks.MineMicro: + return a, nil, nil + case tasks.SnapshotTimeout: + t, ok := task.Data.(tasks.SnapshotTimeoutTaskData) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type %T, expected 'tasks.SnapshotTimeoutTaskData'", task.Data)) + } + switch t.SnapshotTaskType { + case tasks.BlockSnapshot: + return a, nil, nil + case tasks.MicroBlockSnapshot: + defer a.cleanupBeforeTransition() + zap.S().Named(logging.FSMNamespace).Errorf("%v", a.Errorf(errors.Errorf( + "Failed to get snapshot for microBlock '%s' - timeout", t.BlockID))) + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) + default: + return a, nil, a.Errorf(errors.New("undefined Snapshot Task type")) + } + default: + return a, nil, a.Errorf(errors.Errorf( + "unexpected internal task '%d' with data '%+v' received by %s State", + task.TaskType, task.Data, a.String())) + } +} + +func (a *WaitMicroSnapshotState) Score(p peer.Peer, score *proto.Score) (State, Async, error) { + metrics.FSMScore("ng", score, p.Handshake().NodeName) + if len(a.receivedScores) < scoresSliceMaxSize { + a.receivedScores = append(a.receivedScores, ReceivedScore{Peer: p, Score: score}) + } + return a, nil, nil +} + +func (a *WaitMicroSnapshotState) MicroBlockSnapshot( + _ peer.Peer, + blockID proto.BlockID, + snapshot proto.BlockSnapshot, +) (State, Async, error) { + if a.microBlockWaitingForSnapshot.TotalBlockID != blockID { + return a, nil, a.Errorf(errors.Errorf( + "New snapshot doesn't match with microBlock %s", a.microBlockWaitingForSnapshot.TotalBlockID)) + } + defer a.cleanupBeforeTransition() + // the TopBlock() is used here + block, err := a.checkAndAppendMicroBlock(a.microBlockWaitingForSnapshot, &snapshot) + if err != nil { + metrics.FSMMicroBlockDeclined("ng", a.microBlockWaitingForSnapshot, err) + zap.S().Errorf("%v", a.Errorf(err)) + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) + } + + zap.S().Named(logging.FSMNamespace).Debugf( + "[%s] Received snapshot for microblock '%s' successfully applied to state", a, block.BlockID(), + ) + a.baseInfo.MicroBlockCache.AddMicroBlockWithSnapshot(block.BlockID(), a.microBlockWaitingForSnapshot, &snapshot) + a.blocksCache.AddBlockState(block) + a.blocksCache.AddSnapshot(block.BlockID(), snapshot) + a.baseInfo.scheduler.Reschedule() + // Notify all connected peers about new microblock, send them microblock inv network message + if inv, ok := a.baseInfo.MicroBlockInvCache.Get(block.BlockID()); ok { + //TODO: We have to exclude from recipients peers that already have this microblock + if err = broadcastMicroBlockInv(a.baseInfo, inv); err != nil { + zap.S().Errorf("%v", a.Errorf(errors.Wrap(err, "Failed to handle micro block message"))) + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) + } + } + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) +} + +func (a *WaitMicroSnapshotState) cleanupBeforeTransition() { + a.microBlockWaitingForSnapshot = nil + if a.timeoutTaskOutdated != nil { + close(a.timeoutTaskOutdated) + a.timeoutTaskOutdated = nil + } + a.receivedScores = nil +} + +func (a *WaitMicroSnapshotState) checkAndAppendMicroBlock( + micro *proto.MicroBlock, + snapshot *proto.BlockSnapshot, +) (*proto.Block, error) { + top := a.baseInfo.storage.TopBlock() // Get the last block + if top.BlockID() != micro.Reference { // Microblock doesn't refer to last block + err := errors.Errorf("microblock TBID '%s' refer to block ID '%s' but last block ID is '%s'", + micro.TotalBlockID.String(), micro.Reference.String(), top.BlockID().String()) + metrics.FSMMicroBlockDeclined("ng", micro, err) + return &proto.Block{}, proto.NewInfoMsg(err) + } + ok, err := micro.VerifySignature(a.baseInfo.scheme) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.Errorf("microblock '%s' has invalid signature", micro.TotalBlockID.String()) + } + newTrs := top.Transactions.Join(micro.Transactions) + newBlock, err := proto.CreateBlock(newTrs, top.Timestamp, top.Parent, top.GeneratorPublicKey, top.NxtConsensus, + top.Version, top.Features, top.RewardVote, a.baseInfo.scheme) + if err != nil { + return nil, err + } + // TODO: check if light node feature activated + 1000 blocks + newBlock.StateHash = micro.StateHash + newBlock.BlockSignature = micro.TotalResBlockSigField + ok, err = newBlock.VerifySignature(a.baseInfo.scheme) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("incorrect signature for applied microblock") + } + err = newBlock.GenerateBlockID(a.baseInfo.scheme) + if err != nil { + return nil, errors.Wrap(err, "NGState microBlockByID: failed generate block id") + } + snapshotsToApply := snapshot + + h, errBToH := a.baseInfo.storage.BlockIDToHeight(top.BlockID()) + if errBToH != nil { + return nil, errBToH + } + topBlockSnapshots, errSAtH := a.baseInfo.storage.SnapshotsAtHeight(h) + if errSAtH != nil { + return nil, errSAtH + } + + topBlockSnapshots.AppendTxSnapshots(snapshot.TxSnapshots) + + snapshotsToApply = &topBlockSnapshots + err = a.baseInfo.storage.Map(func(state state.State) error { + _, er := a.baseInfo.blocksApplier.ApplyMicroWithSnapshots(state, newBlock, snapshotsToApply) + return er + }) + + if err != nil { + metrics.FSMMicroBlockDeclined("ng", micro, err) + return nil, errors.Wrap(err, "failed to apply created from micro block") + } + metrics.FSMMicroBlockApplied("ng", micro) + return newBlock, nil +} + +func initWaitMicroSnapshotStateInFSM(state *StateData, fsm *stateless.StateMachine, info BaseInfo) { + waitSnapshotSkipMessageList := proto.PeerMessageIDs{ + proto.ContentIDGetPeers, + proto.ContentIDPeers, + proto.ContentIDGetSignatures, + proto.ContentIDSignatures, + proto.ContentIDGetBlock, + proto.ContentIDBlock, + proto.ContentIDTransaction, + proto.ContentIDInvMicroblock, + proto.ContentIDCheckpoint, + proto.ContentIDMicroblockRequest, + proto.ContentIDMicroblock, + proto.ContentIDPBBlock, + proto.ContentIDPBMicroBlock, + proto.ContentIDPBTransaction, + proto.ContentIDGetBlockIDs, + proto.ContentIDBlockSnapshot, + } + fsm.Configure(WaitMicroSnapshotStateName). //nolint:dupl // it's state setup + OnEntry(func(_ context.Context, _ ...interface{}) error { + info.skipMessageList.SetList(waitSnapshotSkipMessageList) + return nil + }). + Ignore(BlockEvent). + Ignore(MinedBlockEvent). + Ignore(BlockIDsEvent). + Ignore(MicroBlockEvent). + Ignore(MicroBlockInvEvent). + Ignore(TransactionEvent). + Ignore(StopSyncEvent). + Ignore(StartMiningEvent). + Ignore(ChangeSyncPeerEvent). + Ignore(StopMiningEvent). + Ignore(HaltEvent). + Ignore(BlockSnapshotEvent). + PermitDynamic(TaskEvent, + createPermitDynamicCallback(TaskEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*WaitMicroSnapshotState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*WaitMicroSnapshotState'", state.State)) + } + return a.Task(args[0].(tasks.AsyncTask)) + })). + PermitDynamic(MicroBlockSnapshotEvent, + createPermitDynamicCallback(MicroBlockSnapshotEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*WaitMicroSnapshotState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*WaitMicroSnapshotState'", state.State)) + } + return a.MicroBlockSnapshot( + convertToInterface[peer.Peer](args[0]), + args[1].(proto.BlockID), + args[2].(proto.BlockSnapshot), + ) + })). + PermitDynamic(ScoreEvent, + createPermitDynamicCallback(ScoreEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*WaitMicroSnapshotState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*WaitMicroSnapshotState'", state.State)) + } + return a.Score(convertToInterface[peer.Peer](args[0]), args[1].(*proto.Score)) + })) +} diff --git a/pkg/node/fsm/wait_snapshot_state.go b/pkg/node/fsm/wait_snapshot_state.go new file mode 100644 index 000000000..66b9568df --- /dev/null +++ b/pkg/node/fsm/wait_snapshot_state.go @@ -0,0 +1,209 @@ +package fsm + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/qmuntal/stateless" + "go.uber.org/zap" + + "github.com/wavesplatform/gowaves/pkg/logging" + "github.com/wavesplatform/gowaves/pkg/metrics" + "github.com/wavesplatform/gowaves/pkg/node/fsm/tasks" + "github.com/wavesplatform/gowaves/pkg/p2p/peer" + "github.com/wavesplatform/gowaves/pkg/proto" +) + +const ( + snapshotTimeout = 30 * time.Second +) + +type WaitSnapshotState struct { + baseInfo BaseInfo + blocksCache blockStatesCache + timeoutTaskOutdated chan<- struct{} + blockWaitingForSnapshot *proto.Block + + receivedScores []ReceivedScore +} + +type ReceivedScore struct { + Peer peer.Peer + Score *proto.Score +} + +func newWaitSnapshotState(baseInfo BaseInfo, block *proto.Block, cache blockStatesCache) (State, tasks.Task) { + baseInfo.syncPeer.Clear() + timeoutTaskOutdated := make(chan struct{}) + st := &WaitSnapshotState{ + baseInfo: baseInfo, + blocksCache: cache, + timeoutTaskOutdated: timeoutTaskOutdated, + blockWaitingForSnapshot: block, + receivedScores: nil, + } + task := tasks.NewBlockSnapshotTimeoutTask(snapshotTimeout, block.BlockID(), timeoutTaskOutdated) + return st, task +} + +func (a *WaitSnapshotState) Errorf(err error) error { + return fsmErrorf(a, err) +} + +func (a *WaitSnapshotState) String() string { + return WaitSnapshotStateName +} + +func (a *WaitSnapshotState) Task(task tasks.AsyncTask) (State, Async, error) { + switch task.TaskType { + case tasks.Ping: + return a, nil, nil + case tasks.AskPeers: + zap.S().Named(logging.FSMNamespace).Debugf("[%s] Requesting peers", a) + a.baseInfo.peers.AskPeers() + return a, nil, nil + case tasks.MineMicro: + return a, nil, nil + case tasks.SnapshotTimeout: + t, ok := task.Data.(tasks.SnapshotTimeoutTaskData) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type %T, expected 'tasks.SnapshotTimeoutTaskData'", task.Data)) + } + switch t.SnapshotTaskType { + case tasks.BlockSnapshot: + defer a.cleanupBeforeTransition() + zap.S().Errorf("%v", a.Errorf(errors.Errorf( + "Failed to get snapshot for block '%s' - timeout", t.BlockID))) + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) + case tasks.MicroBlockSnapshot: + return a, nil, nil + default: + return a, nil, a.Errorf(errors.New("undefined Snapshot Task type")) + } + default: + return a, nil, a.Errorf(errors.Errorf( + "unexpected internal task '%d' with data '%+v' received by %s State", + task.TaskType, task.Data, a.String())) + } +} + +func (a *WaitSnapshotState) Score(p peer.Peer, score *proto.Score) (State, Async, error) { + metrics.FSMScore("ng", score, p.Handshake().NodeName) + if len(a.receivedScores) < scoresSliceMaxSize { + a.receivedScores = append(a.receivedScores, ReceivedScore{Peer: p, Score: score}) + } + return a, nil, nil +} + +func (a *WaitSnapshotState) BlockSnapshot( + _ peer.Peer, + blockID proto.BlockID, + snapshot proto.BlockSnapshot, +) (State, Async, error) { + if a.blockWaitingForSnapshot.BlockID() != blockID { + return a, nil, a.Errorf( + errors.Errorf("new snapshot doesn't match with block %s", a.blockWaitingForSnapshot.BlockID())) + } + + defer a.cleanupBeforeTransition() + _, err := a.baseInfo.blocksApplier.ApplyWithSnapshots( + a.baseInfo.storage, + []*proto.Block{a.blockWaitingForSnapshot}, + []*proto.BlockSnapshot{&snapshot}, + ) + if err != nil { + zap.S().Errorf("%v", a.Errorf(errors.Wrapf(err, "Failed to apply block %s", a.blockWaitingForSnapshot.BlockID()))) + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) + } + + metrics.FSMKeyBlockApplied("ng", a.blockWaitingForSnapshot) + zap.S().Named(logging.FSMNamespace).Debugf("[%s] Handle received key block message: block '%s' applied to state", + a, blockID) + + a.blocksCache.Clear() + a.blocksCache.AddBlockState(a.blockWaitingForSnapshot) + a.blocksCache.AddSnapshot(blockID, snapshot) + a.baseInfo.scheduler.Reschedule() + a.baseInfo.actions.SendScore(a.baseInfo.storage) + a.baseInfo.CleanUtx() + return processScoreAfterApplyingOrReturnToNG(a, a.baseInfo, a.receivedScores, a.blocksCache) +} + +func (a *WaitSnapshotState) cleanupBeforeTransition() { + a.blockWaitingForSnapshot = nil + if a.timeoutTaskOutdated != nil { + close(a.timeoutTaskOutdated) + a.timeoutTaskOutdated = nil + } + a.receivedScores = nil +} + +func initWaitSnapshotStateInFSM(state *StateData, fsm *stateless.StateMachine, info BaseInfo) { + waitSnapshotSkipMessageList := proto.PeerMessageIDs{ + proto.ContentIDGetPeers, + proto.ContentIDPeers, + proto.ContentIDGetSignatures, + proto.ContentIDSignatures, + proto.ContentIDGetBlock, + proto.ContentIDBlock, + proto.ContentIDTransaction, + proto.ContentIDInvMicroblock, + proto.ContentIDCheckpoint, + proto.ContentIDMicroblockRequest, + proto.ContentIDMicroblock, + proto.ContentIDPBBlock, + proto.ContentIDPBMicroBlock, + proto.ContentIDPBTransaction, + proto.ContentIDGetBlockIDs, + } + fsm.Configure(WaitSnapshotStateName). //nolint:dupl // it's state setup + OnEntry(func(_ context.Context, _ ...interface{}) error { + info.skipMessageList.SetList(waitSnapshotSkipMessageList) + return nil + }). + Ignore(BlockEvent). + Ignore(MinedBlockEvent). + Ignore(BlockIDsEvent). + Ignore(MicroBlockEvent). + Ignore(MicroBlockInvEvent). + Ignore(TransactionEvent). + Ignore(StopSyncEvent). + Ignore(StartMiningEvent). + Ignore(ChangeSyncPeerEvent). + Ignore(StopMiningEvent). + Ignore(HaltEvent). + Ignore(MicroBlockSnapshotEvent). + PermitDynamic(TaskEvent, + createPermitDynamicCallback(TaskEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*WaitSnapshotState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*WaitSnapshotState'", state.State)) + } + return a.Task(args[0].(tasks.AsyncTask)) + })). + PermitDynamic(BlockSnapshotEvent, + createPermitDynamicCallback(BlockSnapshotEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*WaitSnapshotState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*WaitSnapshotState'", state.State)) + } + return a.BlockSnapshot( + convertToInterface[peer.Peer](args[0]), + args[1].(proto.BlockID), + args[2].(proto.BlockSnapshot), + ) + })). + PermitDynamic(ScoreEvent, + createPermitDynamicCallback(ScoreEvent, state, func(args ...interface{}) (State, Async, error) { + a, ok := state.State.(*WaitSnapshotState) + if !ok { + return a, nil, a.Errorf(errors.Errorf( + "unexpected type '%T' expected '*WaitSnapshotState'", state.State)) + } + return a.Score(convertToInterface[peer.Peer](args[0]), args[1].(*proto.Score)) + })) +} diff --git a/pkg/node/node.go b/pkg/node/node.go index b3118d374..e50c6aa63 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -47,10 +47,12 @@ type Node struct { services services.Services microblockInterval time.Duration obsolescence time.Duration + enableLightMode bool } func NewNode( services services.Services, declAddr proto.TCPAddr, bindAddr proto.TCPAddr, microblockInterval time.Duration, + enableLightMode bool, ) *Node { if bindAddr.Empty() { bindAddr = declAddr @@ -64,6 +66,7 @@ func NewNode( utx: services.UtxPool, services: services, microblockInterval: microblockInterval, + enableLightMode: enableLightMode, } } @@ -148,7 +151,7 @@ func (a *Node) Run( tasksCh := make(chan tasks.AsyncTask, 10) // TODO: Consider using context `ctx` in FSM, for now FSM works in the background context. - m, async, err := fsm.NewFSM(a.services, a.microblockInterval, a.obsolescence, syncPeer) + m, async, err := fsm.NewFSM(a.services, a.microblockInterval, a.obsolescence, syncPeer, a.enableLightMode) if err != nil { zap.S().Errorf("Failed to create FSM: %v", err) return diff --git a/pkg/node/peers/peer_manager.go b/pkg/node/peers/peer_manager.go index 96752caad..b0942c6cf 100644 --- a/pkg/node/peers/peer_manager.go +++ b/pkg/node/peers/peer_manager.go @@ -430,7 +430,7 @@ func (a *PeerManagerImpl) AddAddress(ctx context.Context, addr proto.TCPAddr) er if removeErr := a.peerStorage.DeleteKnown([]storage.KnownPeer{known}); removeErr != nil { zap.S().Errorf("Failed to remove peer %q from known peers storage", known.String()) } - zap.S().Named(logging.NetworkNamespace).Debug("Error: %v", err) + zap.S().Named(logging.NetworkNamespace).Debugf("Error: %v", err) } }() return nil diff --git a/pkg/p2p/conn/conn.go b/pkg/p2p/conn/conn.go index 566d31565..b477bbe55 100644 --- a/pkg/p2p/conn/conn.go +++ b/pkg/p2p/conn/conn.go @@ -17,7 +17,9 @@ import ( ) const ( - maxMessageSize = 2 << (10 * 2) + KiB = 1024 + MiB = KiB * KiB + maxMessageSize = 100 * MiB maxConnIODurationPerMessage = 15 * time.Second MaxConnIdleIODuration = 5 * time.Minute ) diff --git a/pkg/p2p/peer/extension/extension.go b/pkg/p2p/peer/extension/extension.go index 5eec3ec7b..940a9349f 100644 --- a/pkg/p2p/peer/extension/extension.go +++ b/pkg/p2p/peer/extension/extension.go @@ -14,6 +14,8 @@ var peerVersionWithProtobuf = proto.NewVersion(1, 2, 0) type PeerExtension interface { AskBlocksIDs(id []proto.BlockID) AskBlock(id proto.BlockID) + AskBlockSnapshot(id proto.BlockID) + AskMicroBlockSnapshot(id proto.BlockID) SendMicroBlock(micro *proto.MicroBlock) error SendTransaction(t proto.Transaction) error } @@ -65,6 +67,20 @@ func (a PeerWrapperImpl) AskBlock(id proto.BlockID) { a.p.SendMessage(&proto.GetBlockMessage{BlockID: id}) } +func (a PeerWrapperImpl) AskBlockSnapshot(id proto.BlockID) { + zap.S().Named(logging.NetworkNamespace).Debugf( + "[%s] Requesting block snapshot for block %s", a.p.ID().String(), id.ShortString(), + ) + a.p.SendMessage(&proto.GetBlockSnapshotMessage{BlockID: id}) +} + +func (a PeerWrapperImpl) AskMicroBlockSnapshot(id proto.BlockID) { + zap.S().Named(logging.NetworkNamespace).Debugf( + "[%s] Requesting micro block snapshot for micro block %s", a.p.ID().String(), id.ShortString(), + ) + a.p.SendMessage(&proto.MicroBlockSnapshotRequestMessage{BlockIDBytes: id.Bytes()}) +} + func (a PeerWrapperImpl) SendMicroBlock(micro *proto.MicroBlock) error { if a.p.Handshake().Version.Cmp(peerVersionWithProtobuf) < 0 { bts, err := micro.MarshalBinary(a.scheme) diff --git a/pkg/p2p/peer/handle_test.go b/pkg/p2p/peer/handle_test.go index e2e16c97d..4c38b0571 100644 --- a/pkg/p2p/peer/handle_test.go +++ b/pkg/p2p/peer/handle_test.go @@ -29,7 +29,7 @@ func TestHandleStopContext(t *testing.T) { <-time.After(1 * time.Millisecond) cancel() }() - parent := NewParent() + parent := NewParent(false) remote := NewRemote() peer := &mockPeer{CloseFunc: func() error { return nil }} err := Handle(ctx, peer, parent, remote) @@ -44,7 +44,7 @@ func TestHandleStopContext(t *testing.T) { func TestHandleReceive(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) remote := NewRemote() - parent := NewParent() + parent := NewParent(false) var wg sync.WaitGroup wg.Add(1) go func() { @@ -69,7 +69,7 @@ func TestHandleReceive(t *testing.T) { func TestHandleError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) remote := NewRemote() - parent := NewParent() + parent := NewParent(false) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/pkg/p2p/peer/peer.go b/pkg/p2p/peer/peer.go index d603ebb60..4499af199 100644 --- a/pkg/p2p/peer/peer.go +++ b/pkg/p2p/peer/peer.go @@ -8,6 +8,11 @@ import ( "github.com/wavesplatform/gowaves/pkg/proto" ) +const ( + defaultChSize = 100 + chSizeInLightMode = defaultChSize * 2 +) + type Remote struct { ToCh chan []byte FromCh chan *bytebufferpool.ByteBuffer @@ -16,8 +21,8 @@ type Remote struct { func NewRemote() Remote { return Remote{ - ToCh: make(chan []byte, 100), - FromCh: make(chan *bytebufferpool.ByteBuffer, 100), + ToCh: make(chan []byte, chSizeInLightMode), + FromCh: make(chan *bytebufferpool.ByteBuffer, chSizeInLightMode), ErrCh: make(chan error, 10), } } @@ -28,10 +33,15 @@ type Parent struct { SkipMessageList *messages.SkipMessageList } -func NewParent() Parent { +func NewParent(enableLightNode bool) Parent { + messageChSize := defaultChSize + if enableLightNode { + // because in light node we send block and snapshot request messages + messageChSize = chSizeInLightMode + } return Parent{ - MessageCh: make(chan ProtoMessage, 100), - InfoCh: make(chan InfoMessage, 100), + MessageCh: make(chan ProtoMessage, messageChSize), + InfoCh: make(chan InfoMessage, messageChSize), SkipMessageList: &messages.SkipMessageList{}, } } diff --git a/pkg/proto/block_snapshot.go b/pkg/proto/block_snapshot.go index 174d0fd44..0f6edcf72 100644 --- a/pkg/proto/block_snapshot.go +++ b/pkg/proto/block_snapshot.go @@ -17,6 +17,10 @@ func (bs *BlockSnapshot) AppendTxSnapshot(txSnapshot []AtomicSnapshot) { bs.TxSnapshots = append(bs.TxSnapshots, txSnapshot) } +func (bs *BlockSnapshot) AppendTxSnapshots(txSnapshots [][]AtomicSnapshot) { + bs.TxSnapshots = append(bs.TxSnapshots, txSnapshots...) +} + func (bs BlockSnapshot) MarshallBinary() ([]byte, error) { result := binary.BigEndian.AppendUint32([]byte{}, uint32(len(bs.TxSnapshots))) for _, ts := range bs.TxSnapshots { @@ -68,6 +72,21 @@ func (bs *BlockSnapshot) UnmarshalBinary(data []byte, scheme Scheme) error { return nil } +func (bs BlockSnapshot) ToProtobuf() ([]*g.TransactionStateSnapshot, error) { + data := make([]g.TransactionStateSnapshot, len(bs.TxSnapshots)) + res := make([]*g.TransactionStateSnapshot, len(bs.TxSnapshots)) + for i, ts := range bs.TxSnapshots { + tsProto := &data[i] + for _, atomicSnapshot := range ts { + if err := atomicSnapshot.AppendToProtobuf(tsProto); err != nil { + return nil, errors.Wrap(err, "failed to marshall TransactionSnapshot to proto") + } + } + res[i] = tsProto + } + return res, nil +} + func (bs BlockSnapshot) MarshalJSON() ([]byte, error) { if len(bs.TxSnapshots) == 0 { return []byte("[]"), nil @@ -311,3 +330,39 @@ func (s *txSnapshotJSON) ApplyTransactionsStatus(snapshot TransactionStatusSnaps s.ApplicationStatus = snapshot.Status return nil } + +func (bs *BlockSnapshot) UnmarshalBinaryImport(data []byte, scheme Scheme) error { + if len(data) < uint32Size { + return errors.Errorf("BlockSnapshot UnmarshallBinary: invalid data size") + } + snapshotsBytesSize := binary.BigEndian.Uint32(data[0:uint32Size]) + data = data[uint32Size:] // skip size + if snapshotsBytesSize != uint32(len(data)) { + return errors.Errorf("invalid snapshots data size") + } + var txSnapshots [][]AtomicSnapshot + for i := uint32(0); snapshotsBytesSize > 0; i++ { + if len(data) < uint32Size { + return errors.Errorf("BlockSnapshot UnmarshallBinary: invalid data size") + } + oneSnapshotSize := binary.BigEndian.Uint32(data[0:uint32Size]) + var tsProto g.TransactionStateSnapshot + data = data[uint32Size:] // skip size + if uint32(len(data)) < oneSnapshotSize { + return errors.Errorf("BlockSnapshot UnmarshallBinary: invalid snapshot size") + } + err := tsProto.UnmarshalVT(data[0:oneSnapshotSize]) + if err != nil { + return err + } + atomicTS, err := TxSnapshotsFromProtobuf(scheme, &tsProto) + if err != nil { + return err + } + txSnapshots = append(txSnapshots, atomicTS) + data = data[oneSnapshotSize:] + snapshotsBytesSize -= oneSnapshotSize + uint32Size + } + bs.TxSnapshots = txSnapshots + return nil +} diff --git a/pkg/proto/proto.go b/pkg/proto/proto.go index 2c9faf878..8a917301f 100644 --- a/pkg/proto/proto.go +++ b/pkg/proto/proto.go @@ -12,6 +12,7 @@ import ( "github.com/pkg/errors" "github.com/valyala/bytebufferpool" + "github.com/wavesplatform/gowaves/pkg/crypto" "github.com/wavesplatform/gowaves/pkg/util/collect_writes" ) @@ -33,26 +34,33 @@ type ( // Constants for message IDs const ( - ContentIDGetPeers PeerMessageID = 0x1 - ContentIDPeers PeerMessageID = 0x2 - ContentIDGetSignatures PeerMessageID = 0x14 - ContentIDSignatures PeerMessageID = 0x15 - ContentIDGetBlock PeerMessageID = 0x16 - ContentIDBlock PeerMessageID = 0x17 - ContentIDScore PeerMessageID = 0x18 - ContentIDTransaction PeerMessageID = 0x19 - ContentIDInvMicroblock PeerMessageID = 0x1A - ContentIDCheckpoint PeerMessageID = 0x64 - ContentIDMicroblockRequest PeerMessageID = 27 - ContentIDMicroblock PeerMessageID = 28 - ContentIDPBBlock PeerMessageID = 29 - ContentIDPBMicroBlock PeerMessageID = 30 - ContentIDPBTransaction PeerMessageID = 31 - ContentIDGetBlockIds PeerMessageID = 32 - ContentIDBlockIds PeerMessageID = 33 + ContentIDGetPeers PeerMessageID = 0x1 + ContentIDPeers PeerMessageID = 0x2 + ContentIDGetSignatures PeerMessageID = 0x14 + ContentIDSignatures PeerMessageID = 0x15 + ContentIDGetBlock PeerMessageID = 0x16 + ContentIDBlock PeerMessageID = 0x17 + ContentIDScore PeerMessageID = 0x18 + ContentIDTransaction PeerMessageID = 0x19 + ContentIDInvMicroblock PeerMessageID = 0x1A + ContentIDCheckpoint PeerMessageID = 0x64 + ContentIDMicroblockRequest PeerMessageID = 27 + ContentIDMicroblock PeerMessageID = 28 + ContentIDPBBlock PeerMessageID = 29 + ContentIDPBMicroBlock PeerMessageID = 30 + ContentIDPBTransaction PeerMessageID = 31 + ContentIDGetBlockIDs PeerMessageID = 32 + ContentIDBlockIDs PeerMessageID = 33 + ContentIDGetBlockSnapshot PeerMessageID = 34 + ContentIDMicroBlockSnapshotRequest PeerMessageID = 35 + ContentIDBlockSnapshot PeerMessageID = 36 + ContentIDMicroBlockSnapshot PeerMessageID = 37 ) -var ProtocolVersion = NewVersion(1, 4, 0) +func ProtocolVersion() Version { + const major, minor, patch = 1, 5, 0 + return NewVersion(major, minor, patch) +} type Message interface { io.ReaderFrom @@ -1805,10 +1813,18 @@ func UnmarshalMessage(b []byte) (Message, error) { m = &PBMicroBlockMessage{} case ContentIDPBTransaction: m = &PBTransactionMessage{} - case ContentIDGetBlockIds: + case ContentIDGetBlockIDs: m = &GetBlockIdsMessage{} - case ContentIDBlockIds: + case ContentIDBlockIDs: m = &BlockIdsMessage{} + case ContentIDGetBlockSnapshot: + m = &GetBlockSnapshotMessage{} + case ContentIDMicroBlockSnapshotRequest: + m = &MicroBlockSnapshotRequestMessage{} + case ContentIDBlockSnapshot: + m = &BlockSnapshotMessage{} + case ContentIDMicroBlockSnapshot: + m = &MicroBlockSnapshotMessage{} default: return nil, errors.Errorf( "received unknown content id byte %d 0x%x", b[HeaderContentIDPosition], b[HeaderContentIDPosition]) @@ -1837,7 +1853,7 @@ func (m *GetBlockIdsMessage) MarshalBinary() ([]byte, error) { var h Header h.Length = maxHeaderLength + uint32(len(body)) - 4 h.Magic = headerMagic - h.ContentID = ContentIDGetBlockIds + h.ContentID = ContentIDGetBlockIDs h.PayloadLength = uint32(len(body)) dig, err := crypto.FastHash(body) if err != nil { @@ -1866,7 +1882,7 @@ func (m *GetBlockIdsMessage) UnmarshalBinary(data []byte) error { if h.Magic != headerMagic { return fmt.Errorf("wrong magic in Header: %x", h.Magic) } - if h.ContentID != ContentIDGetBlockIds { + if h.ContentID != ContentIDGetBlockIDs { return fmt.Errorf("wrong ContentID in Header: %x", h.ContentID) } data = data[17:] @@ -1933,7 +1949,7 @@ func (m *BlockIdsMessage) MarshalBinary() ([]byte, error) { var h Header h.Length = maxHeaderLength + uint32(len(body)) - 4 h.Magic = headerMagic - h.ContentID = ContentIDBlockIds + h.ContentID = ContentIDBlockIDs h.PayloadLength = uint32(len(body)) dig, err := crypto.FastHash(body) if err != nil { @@ -1963,7 +1979,7 @@ func (m *BlockIdsMessage) UnmarshalBinary(data []byte) error { if h.Magic != headerMagic { return fmt.Errorf("wrong magic in Header: %x", h.Magic) } - if h.ContentID != ContentIDBlockIds { + if h.ContentID != ContentIDBlockIDs { return fmt.Errorf("wrong ContentID in Header: %x", h.ContentID) } data = data[17:] @@ -2044,3 +2060,233 @@ type MiningLimits struct { ClassicAmountOfTxsInBlock int MaxTxsSizeInBytes int } + +func buildHeader(body []byte, messID PeerMessageID) (Header, error) { + var h Header + h.Length = maxHeaderLength + uint32(len(body)) - headerChecksumLen + h.Magic = headerMagic + h.ContentID = messID + h.PayloadLength = uint32(len(body)) + dig, err := crypto.FastHash(body) + if err != nil { + return Header{}, err + } + copy(h.PayloadChecksum[:], dig[:headerChecksumLen]) + return h, nil +} + +type GetBlockSnapshotMessage struct { + BlockID BlockID +} + +func (m *GetBlockSnapshotMessage) ReadFrom(r io.Reader) (int64, error) { + packet, nn, err := readPacket(r) + if err != nil { + return nn, err + } + + return nn, m.UnmarshalBinary(packet) +} + +func (m *GetBlockSnapshotMessage) WriteTo(w io.Writer) (int64, error) { + buf, err := m.MarshalBinary() + if err != nil { + return 0, err + } + nn, err := w.Write(buf) + n := int64(nn) + return n, err +} + +func (m *GetBlockSnapshotMessage) UnmarshalBinary(data []byte) error { + return parsePacket(data, ContentIDGetBlockSnapshot, "GetBlockSnapshotMessage", func(payload []byte) error { + blockID, err := NewBlockIDFromBytes(payload) + if err != nil { + return err + } + m.BlockID = blockID + return nil + }) +} + +func (m *GetBlockSnapshotMessage) MarshalBinary() ([]byte, error) { + body := m.BlockID.Bytes() + h, err := buildHeader(body, ContentIDGetBlockSnapshot) + if err != nil { + return nil, err + } + hdr, err := h.MarshalBinary() + if err != nil { + return nil, err + } + body = append(hdr, body...) + return body, nil +} + +type BlockSnapshotMessage struct { + Bytes []byte +} + +func (m *BlockSnapshotMessage) ReadFrom(r io.Reader) (int64, error) { + packet, nn, err := readPacket(r) + if err != nil { + return nn, err + } + + return nn, m.UnmarshalBinary(packet) +} + +func (m *BlockSnapshotMessage) WriteTo(w io.Writer) (int64, error) { + buf, err := m.MarshalBinary() + if err != nil { + return 0, err + } + nn, err := w.Write(buf) + n := int64(nn) + return n, err +} + +func (m *BlockSnapshotMessage) UnmarshalBinary(data []byte) error { + if len(data) < maxHeaderLength { + return errors.New("BlockSnapshotMessage UnmarshalBinary: invalid data size") + } + var h Header + + if err := h.UnmarshalBinary(data); err != nil { + return err + } + if h.Magic != headerMagic { + return fmt.Errorf("wrong magic in Header: %x", h.Magic) + } + if h.ContentID != ContentIDBlockSnapshot { + return fmt.Errorf("wrong ContentID in Header: %x", h.ContentID) + } + m.Bytes = make([]byte, h.PayloadLength) + copy(m.Bytes, data[maxHeaderLength:maxHeaderLength+h.PayloadLength]) + return nil +} + +func (m *BlockSnapshotMessage) MarshalBinary() ([]byte, error) { + body := m.Bytes + + h, err := buildHeader(body, ContentIDBlockSnapshot) + if err != nil { + return nil, err + } + + hdr, err := h.MarshalBinary() + if err != nil { + return nil, err + } + body = append(hdr, body...) + return body, nil +} + +type MicroBlockSnapshotMessage struct { + Bytes []byte +} + +func (m *MicroBlockSnapshotMessage) ReadFrom(r io.Reader) (int64, error) { + packet, nn, err := readPacket(r) + if err != nil { + return nn, err + } + + return nn, m.UnmarshalBinary(packet) +} + +func (m *MicroBlockSnapshotMessage) WriteTo(w io.Writer) (int64, error) { + buf, err := m.MarshalBinary() + if err != nil { + return 0, err + } + nn, err := w.Write(buf) + n := int64(nn) + return n, err +} + +func (m *MicroBlockSnapshotMessage) UnmarshalBinary(data []byte) error { + if len(data) < maxHeaderLength { + return errors.New("MicroBlockSnapshotMessage UnmarshalBinary: invalid data size") + } + var h Header + + if err := h.UnmarshalBinary(data); err != nil { + return err + } + if h.Magic != headerMagic { + return fmt.Errorf("wrong magic in Header: %x", h.Magic) + } + if h.ContentID != ContentIDMicroBlockSnapshot { + return fmt.Errorf("wrong ContentID in Header: %x", h.ContentID) + } + m.Bytes = make([]byte, h.PayloadLength) + copy(m.Bytes, data[maxHeaderLength:maxHeaderLength+h.PayloadLength]) + return nil +} + +func (m *MicroBlockSnapshotMessage) MarshalBinary() ([]byte, error) { + body := m.Bytes + + h, err := buildHeader(body, ContentIDMicroBlockSnapshot) + if err != nil { + return nil, err + } + + hdr, err := h.MarshalBinary() + if err != nil { + return nil, err + } + body = append(hdr, body...) + return body, nil +} + +type MicroBlockSnapshotRequestMessage struct { + BlockIDBytes []byte +} + +func (m *MicroBlockSnapshotRequestMessage) ReadFrom(r io.Reader) (int64, error) { + packet, nn, err := readPacket(r) + if err != nil { + return nn, err + } + + return nn, m.UnmarshalBinary(packet) +} + +func (m *MicroBlockSnapshotRequestMessage) WriteTo(w io.Writer) (int64, error) { + buf, err := m.MarshalBinary() + if err != nil { + return 0, err + } + nn, err := w.Write(buf) + n := int64(nn) + return n, err +} + +func (m *MicroBlockSnapshotRequestMessage) UnmarshalBinary(data []byte) error { + return parsePacket( + data, + ContentIDMicroBlockSnapshotRequest, + "MicroBlockSnapshotRequestMessage", + func(payload []byte) error { + m.BlockIDBytes = payload + return nil + }) +} + +func (m *MicroBlockSnapshotRequestMessage) MarshalBinary() ([]byte, error) { + body := m.BlockIDBytes + + h, err := buildHeader(body, ContentIDMicroBlockSnapshotRequest) + if err != nil { + return nil, err + } + + hdr, err := h.MarshalBinary() + if err != nil { + return nil, err + } + body = append(hdr, body...) + return body, nil +} diff --git a/pkg/proto/protobuf_converters.go b/pkg/proto/protobuf_converters.go index 214ca2095..d00f7ae8a 100644 --- a/pkg/proto/protobuf_converters.go +++ b/pkg/proto/protobuf_converters.go @@ -303,13 +303,14 @@ func appendSponsorshipFromProto( return res, nil } -// TxSnapshotsFromProtobufWithoutTxStatus Unmarshalling order (how in proto schemas): -// WavesBalances and AssetBalances -// LeaseBalances +// TxSnapshotsFromProtobufWithoutTxStatus Unmarshalling order +// (don't change it if it is not necessary, order is important): // NewAsset // AssetVolume // AssetDescription // AssetScript +// WavesBalances and AssetBalances +// LeaseBalances // Alias // FilledVolumes // NewLeases @@ -325,27 +326,27 @@ func TxSnapshotsFromProtobufWithoutTxStatus( txSnapshots []AtomicSnapshot err error ) - txSnapshots, err = appendBalancesFromProto(txSnapshots, scheme, txSnapshotProto.Balances) + txSnapshots, err = appendNewAssetFromProto(txSnapshots, txSnapshotProto.AssetStatics) if err != nil { return nil, err } - txSnapshots, err = appendLeaseBalancesFromProto(txSnapshots, scheme, txSnapshotProto.LeaseBalances) + txSnapshots, err = appendAssetVolumeFromProto(txSnapshots, txSnapshotProto.AssetVolumes) if err != nil { return nil, err } - txSnapshots, err = appendNewAssetFromProto(txSnapshots, txSnapshotProto.AssetStatics) + txSnapshots, err = appendAssetDescriptionFromProto(txSnapshots, txSnapshotProto.AssetNamesAndDescriptions) if err != nil { return nil, err } - txSnapshots, err = appendAssetVolumeFromProto(txSnapshots, txSnapshotProto.AssetVolumes) + txSnapshots, err = appendAssetScriptFromProto(txSnapshots, txSnapshotProto.AssetScripts) if err != nil { return nil, err } - txSnapshots, err = appendAssetDescriptionFromProto(txSnapshots, txSnapshotProto.AssetNamesAndDescriptions) + txSnapshots, err = appendBalancesFromProto(txSnapshots, scheme, txSnapshotProto.Balances) if err != nil { return nil, err } - txSnapshots, err = appendAssetScriptFromProto(txSnapshots, txSnapshotProto.AssetScripts) + txSnapshots, err = appendLeaseBalancesFromProto(txSnapshots, scheme, txSnapshotProto.LeaseBalances) if err != nil { return nil, err } @@ -396,6 +397,19 @@ func TxSnapshotsFromProtobuf(scheme Scheme, txSnapshotProto *g.TransactionStateS return txSnapshots, nil } +func BlockSnapshotFromProtobuf(scheme Scheme, blockSnapshot []*g.TransactionStateSnapshot) (BlockSnapshot, error) { + res := BlockSnapshot{TxSnapshots: make([][]AtomicSnapshot, 0, len(blockSnapshot))} + for _, ts := range blockSnapshot { + var txSnapshots []AtomicSnapshot + txSnapshots, err := TxSnapshotsFromProtobuf(scheme, ts) + if err != nil { + return BlockSnapshot{}, err + } + res.AppendTxSnapshot(txSnapshots) + } + return res, nil +} + type ProtobufConverter struct { FallbackChainID byte err error diff --git a/pkg/proto/snapshot_types.go b/pkg/proto/snapshot_types.go index 64844f127..238872d19 100644 --- a/pkg/proto/snapshot_types.go +++ b/pkg/proto/snapshot_types.go @@ -8,6 +8,7 @@ import ( "github.com/wavesplatform/gowaves/pkg/crypto" g "github.com/wavesplatform/gowaves/pkg/grpc/generated/waves" + "github.com/wavesplatform/gowaves/pkg/util/common" ) type AtomicSnapshot interface { @@ -16,6 +17,7 @@ type AtomicSnapshot interface { because balances diffs are applied later in the block. */ AppendToProtobuf(txSnapshots *g.TransactionStateSnapshot) error } + type WavesBalanceSnapshot struct { Address WavesAddress `json:"address"` Balance uint64 `json:"balance"` @@ -279,15 +281,8 @@ func (s *LeaseBalanceSnapshot) FromProtobuf(scheme Scheme, p *g.TransactionState if err != nil { return err } - var c ProtobufConverter - in := c.uint64(p.In) - if c.err != nil { - return c.err - } - out := c.uint64(p.Out) - if c.err != nil { - return c.err - } + in := uint64(p.In) + out := uint64(p.Out) s.Address = addr s.LeaseIn = in s.LeaseOut = out @@ -573,7 +568,7 @@ func (s AssetVolumeSnapshot) ToProtobuf() (*g.TransactionStateSnapshot_AssetVolu return &g.TransactionStateSnapshot_AssetVolume{ AssetId: s.AssetID.Bytes(), Reissuable: s.IsReissuable, - Volume: s.TotalQuantity.Bytes(), + Volume: common.Encode2CBigInt(&s.TotalQuantity), }, nil } @@ -594,7 +589,7 @@ func (s *AssetVolumeSnapshot) FromProtobuf(p *g.TransactionStateSnapshot_AssetVo } s.AssetID = assetID - s.TotalQuantity.SetBytes(p.Volume) + s.TotalQuantity = *common.Decode2CBigInt(p.Volume) s.IsReissuable = p.Reissuable return nil } diff --git a/pkg/proto/snapshot_types_test.go b/pkg/proto/snapshot_types_test.go index cc240a6cf..d55f3fe62 100644 --- a/pkg/proto/snapshot_types_test.go +++ b/pkg/proto/snapshot_types_test.go @@ -81,6 +81,10 @@ func TestTxSnapshotMarshalToPBAndUnmarshalFromPB(t *testing.T) { testCaseName: "all_together", pbInBase64: "CkMKGgFUYP1Q7yDeRXEgffuciL58HC+KIscK2I+1EiUKIF5mn4IKZ9CIbYdHjPBDoqx4XMevVdwxzhB1OUvTUKJbEJBOCkQKGgFUQsXJY3P1D9gTUGBPHBTypsklatr9GbAqEiYKIHidwBEj1TYPcIKv1LRquL/otRYLv7UmwEPl/Hg6T4lOEKCcAQokChoBVGD9UO8g3kVxIH37nIi+fBwviiLHCtiPtRIGEICU69wDCiQKGgFUQsXJY3P1D9gTUGBPHBTypsklatr9GbAqEgYQgKjWuQcSIgoaAVRg/VDvIN5FcSB9+5yIvnwcL4oixwrYj7UYgJri4RASIgoaAVRCxcljc/UP2BNQYE8cFPKmySVq2v0ZsCoQgK7NvhQSIgoaAVQwI8uotbzVfYC2BqPYrAX1CRomrjsJ6/0YgKjWuQcSHAoaAVRhIl3y/Mj2ursZ0i4PLrkkxzzOLj3sT3waZgoguIIzLIWCBbxl3Ysa38C0yvtZan6R9ZvOU33eldmrOo0SIFDHWa9Cd6VU8M20LLFHzbBTveERf1sEOw19SUS40GBoGhoBVELFyWNz9Q/YE1BgTxwU8qbJJWra/RmwKiCA8ouoCSIiCiC4gjMshYIFvGXdixrfwLTK+1lqfpH1m85Tfd6V2as6jSpGCiBeZp+CCmfQiG2HR4zwQ6KseFzHr1XcMc4QdTlL01CiWxIg3GBhamPTKLR06Q6bJKMnDfzLetm2Xz8SAuH6VNGUwZ4gASpGCiB4ncARI9U2D3CCr9S0ari/6LUWC7+1JsBD5fx4Ok+JThIg3GBhamPTKLR06Q6bJKMnDfzLetm2Xz8SAuH6VNGUwZ4YCDIvCiB4ncARI9U2D3CCr9S0ari/6LUWC7+1JsBD5fx4Ok+JThABGgkE//////////YyJQogXmafggpn0Ihth0eM8EOirHhcx69V3DHOEHU5S9NQolsaAQEyKAogOG+NPdNOUn6/g2LbTm9xhzWb1ZaCdA8Wi+OYkjUfrbIaBDuaygA6QwogeJ3AESPVNg9wgq/UtGq4v+i1Fgu/tSbAQ+X8eDpPiU4SB25ld25hbWUaFnNvbWUgZmFuY3kgZGVzY3JpcHRpb25KJgoaAVRCxcljc/UP2BNQYE8cFPKmySVq2v0ZsCoSCHdhdmVzZXZvUisKIMkknO8yHpMUT/XKkkdlrbYCG0Dt+qvVgphfgtRbyRDMEICU69wDGNAPUisKIJZ9YwvJObbWItHAD2zhbaFOTFx2zQ4p0Xbo81GXHKeEEICU69wDGNAPWi4KIFDHWa9Cd6VU8M20LLFHzbBTveERf1sEOw19SUS40GBoEgcGAQaw0U/PGPoBYloKGgFUYP1Q7yDeRXEgffuciL58HC+KIscK2I+1EgUKA2ZvbxISCgNiYXJqC1N0cmluZ1ZhbHVlEiEKA2JhemIaAVRg/VDvIN5FcSB9+5yIvnwcL4oixwrYj7ViLwoaAVRCxcljc/UP2BNQYE8cFPKmySVq2v0ZsCoSCAoDZm9vULAJEgcKA2JhclgBaiUKIHidwBEj1TYPcIKv1LRquL/otRYLv7UmwEPl/Hg6T4lOEPwqcAE=", //nolint:lll }, + { + testCaseName: "asset_volume_two's_complement", + pbInBase64: "MigKIOfYm9p3M/NiYXCvwCU3ho5eVFpwE5iekWev4QXhZMvuEAEaAgDI", + }, } for _, testCase := range testCases { t.Run(testCase.testCaseName, func(t *testing.T) { diff --git a/pkg/services/services.go b/pkg/services/services.go index 50abc3b94..635c5db96 100644 --- a/pkg/services/services.go +++ b/pkg/services/services.go @@ -11,13 +11,31 @@ import ( type BlocksApplier interface { BlockExists(state state.State, block *proto.Block) (bool, error) - Apply(state state.State, block []*proto.Block) (proto.Height, error) - ApplyMicro(state state.State, block *proto.Block) (proto.Height, error) + Apply( + state state.State, + block []*proto.Block, + ) (proto.Height, error) + ApplyMicro( + state state.State, + block *proto.Block, + ) (proto.Height, error) + ApplyWithSnapshots( + state state.State, + block []*proto.Block, + snapshots []*proto.BlockSnapshot, + ) (proto.Height, error) + ApplyMicroWithSnapshots( + state state.State, + block *proto.Block, + snapshots *proto.BlockSnapshot, + ) (proto.Height, error) } type MicroBlockCache interface { - Add(blockID proto.BlockID, micro *proto.MicroBlock) - Get(proto.BlockID) (*proto.MicroBlock, bool) + AddMicroBlock(blockID proto.BlockID, micro *proto.MicroBlock) + AddMicroBlockWithSnapshot(blockID proto.BlockID, micro *proto.MicroBlock, snapshot *proto.BlockSnapshot) + GetBlock(sig proto.BlockID) (*proto.MicroBlock, bool) + GetSnapshot(sig proto.BlockID) (*proto.BlockSnapshot, bool) } type MicroBlockInvCache interface { diff --git a/pkg/state/address_transactions_test.go b/pkg/state/address_transactions_test.go index 7bc825945..cae6677ad 100644 --- a/pkg/state/address_transactions_test.go +++ b/pkg/state/address_transactions_test.go @@ -13,7 +13,7 @@ import ( func testIterImpl(t *testing.T, params StateParams) { dataDir := t.TempDir() - st, err := NewState(dataDir, true, params, settings.MainNetSettings) + st, err := NewState(dataDir, true, params, settings.MainNetSettings, false) require.NoError(t, err) t.Cleanup(func() { diff --git a/pkg/state/api.go b/pkg/state/api.go index 17a20a764..4c1ac38cc 100644 --- a/pkg/state/api.go +++ b/pkg/state/api.go @@ -152,8 +152,10 @@ type StateModifier interface { AddDeserializedBlock(block *proto.Block) (*proto.Block, error) // AddBlocks adds batch of new blocks to state. AddBlocks(blocks [][]byte) error + AddBlocksWithSnapshots(blocks [][]byte, snapshots []*proto.BlockSnapshot) error // AddDeserializedBlocks marshals blocks to binary and calls AddBlocks. AddDeserializedBlocks(blocks []*proto.Block) (*proto.Block, error) + AddDeserializedBlocksWithSnapshots(blocks []*proto.Block, snapshots []*proto.BlockSnapshot) (*proto.Block, error) // Rollback functionality. RollbackToHeight(height proto.Height) error RollbackTo(removalEdge proto.BlockID) error @@ -200,8 +202,14 @@ type State interface { // and state will try to sync and use it in this case. // params are state parameters (see below). // settings are blockchain settings (settings.MainNetSettings, settings.TestNetSettings or custom settings). -func NewState(dataDir string, amend bool, params StateParams, settings *settings.BlockchainSettings) (State, error) { - s, err := newStateManager(dataDir, amend, params, settings) +func NewState( + dataDir string, + amend bool, + params StateParams, + settings *settings.BlockchainSettings, + enableLightNode bool, +) (State, error) { + s, err := newStateManager(dataDir, amend, params, settings, enableLightNode) if err != nil { return nil, errors.Wrap(err, "failed to create new state instance") } diff --git a/pkg/state/appender.go b/pkg/state/appender.go index 829b41aea..9ea3d0a38 100644 --- a/pkg/state/appender.go +++ b/pkg/state/appender.go @@ -158,6 +158,7 @@ type appendBlockParams struct { blockchainHeight proto.Height fixSnapshotsToInitialHash []proto.AtomicSnapshot lastSnapshotStateHash crypto.Digest + optionalSnapshot *proto.BlockSnapshot } func (a *txAppender) orderIsScripted(order proto.Order) (bool, error) { @@ -369,9 +370,7 @@ func (a *txAppender) commitTxApplication( } if !params.validatingUtx { - // TODO: snapshots for miner fee should be generated here, but not saved - // They must be saved in snapshot applier - // Count tx fee. + // Count tx fee. This should not affect transaction execution. It only accumulates miner fee. if err := a.blockDiffer.countMinerFee(tx); err != nil { return txSnapshot{}, wrapErr(TxCommitmentError, errors.Errorf("failed to count miner fee: %v", err)) } @@ -606,7 +605,6 @@ func (a *txAppender) appendTx(tx proto.Transaction, params *appendTxParams) (txS // invocationResult may be empty if it was not an Invoke Transaction snapshot, err := a.commitTxApplication(tx, params, invocationResult, applicationRes) - if err != nil { zap.S().Errorf("failed to commit transaction (id %s) after successful validation; this should NEVER happen", base58.Encode(txID)) return txSnapshot{}, err @@ -620,6 +618,111 @@ func (a *txAppender) appendTx(tx proto.Transaction, params *appendTxParams) (txS return snapshot, nil } +func (a *txAppender) applySnapshotInLightNode( + params *appendBlockParams, + blockInfo *proto.BlockInfo, + snapshot proto.BlockSnapshot, + stateHash crypto.Digest, + hasher *txSnapshotHasher, +) (crypto.Digest, error) { + if len(snapshot.TxSnapshots) != len(params.transactions) { // sanity check + return crypto.Digest{}, errors.New("number of tx snapshots doesn't match number of transactions") + } + for i, txs := range snapshot.TxSnapshots { + tx := params.transactions[i] + txID, idErr := tx.GetID(a.settings.AddressSchemeCharacter) + if idErr != nil { + return crypto.Digest{}, idErr + } + if len(txs) == 0 { // sanity check + return crypto.Digest{}, errors.Errorf("snapshot of txID %q cannot be empty", base58.Encode(txID)) + } + txSh, shErr := calculateTxSnapshotStateHash(hasher, txID, blockInfo.Height, stateHash, txs) + if shErr != nil { + return crypto.Digest{}, errors.Wrapf(shErr, "failed to calculate tx snapshot hash for txID %q at height %d", + base58.Encode(txID), blockInfo.Height, + ) + } + stateHash = txSh + regSnapshots := txSnapshot{regular: txs} + if err := regSnapshots.Apply(a.txHandler.sa, tx, false); err != nil { + return crypto.Digest{}, errors.Wrap(err, "failed to apply tx snapshot") + } + if fErr := a.blockDiffer.countMinerFee(tx); fErr != nil { + return crypto.Digest{}, errors.Wrapf(fErr, "failed to count miner fee for tx %d", i+1) + } + // TODO: In future we have to store the list of affected addresses for each transaction here. + } + return stateHash, nil +} + +func (a *txAppender) appendTxs( + params *appendBlockParams, + info *checkerInfo, + blockInfo *proto.BlockInfo, + stateHash crypto.Digest, + hasher *txSnapshotHasher, +) (proto.BlockSnapshot, crypto.Digest, error) { + blockV5Activated, err := a.stor.features.newestIsActivated(int16(settings.BlockV5)) + if err != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, err + } + consensusImprovementsActivated, err := a.stor.features.newestIsActivated(int16(settings.ConsensusImprovements)) + if err != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, err + } + blockRewardDistributionActivated, err := a.stor.features.newestIsActivated(int16(settings.BlockRewardDistribution)) + if err != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, err + } + lightNodeActivated, err := a.stor.features.newestIsActivated(int16(settings.LightNode)) + if err != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, err + } + // Check and append transactions. + var bs proto.BlockSnapshot + appendTxArgs := &appendTxParams{ + chans: params.chans, + checkerInfo: info, + blockInfo: blockInfo, + block: params.block, + acceptFailed: blockV5Activated, + blockV5Activated: blockV5Activated, + rideV5Activated: info.rideV5Activated, + rideV6Activated: info.rideV6Activated, + consensusImprovementsActivated: consensusImprovementsActivated, + blockRewardDistributionActivated: blockRewardDistributionActivated, + lightNodeActivated: lightNodeActivated, + validatingUtx: false, + currentMinerPK: params.block.GeneratorPublicKey, + } + for _, tx := range params.transactions { + txSnapshots, errAppendTx := a.appendTx(tx, appendTxArgs) + if errAppendTx != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, errAppendTx + } + bs.AppendTxSnapshot(txSnapshots.regular) + + txID, idErr := tx.GetID(a.settings.AddressSchemeCharacter) + if idErr != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, idErr + } + + if len(txSnapshots.regular) == 0 { // sanity check + return proto.BlockSnapshot{}, crypto.Digest{}, + errors.Errorf("snapshot of txID %q cannot be empty", base58.Encode(txID)) + } + txSh, shErr := calculateTxSnapshotStateHash(hasher, txID, blockInfo.Height, stateHash, txSnapshots.regular) + if shErr != nil { + return proto.BlockSnapshot{}, crypto.Digest{}, errors.Wrapf(shErr, + "failed to calculate tx snapshot hash for txID %q at height %d", base58.Encode(txID), blockInfo.Height, + ) + } + stateHash = txSh // update stateHash in order to accumulate state hashes into block snapshot hash + } + return bs, stateHash, nil +} + func calculateInitialSnapshotStateHash( h *txSnapshotHasher, blockHasParent bool, @@ -667,18 +770,78 @@ func (a *txAppender) appendBlock(params *appendBlockParams) error { a.sc.resetComplexity() a.totalScriptsRuns = 0 }() - rideV5Activated, err := a.stor.features.newestIsActivated(int16(settings.RideV5)) + checkerInfo, err := a.createCheckerInfo(params) if err != nil { return err } - rideV6Activated, err := a.stor.features.newestIsActivated(int16(settings.RideV6)) + hasParent := params.parent != nil + if hasParent { + checkerInfo.parentTimestamp = params.parent.Timestamp + } + + snapshotApplierInfo := newBlockSnapshotsApplierInfo(checkerInfo, a.settings.AddressSchemeCharacter) + a.txHandler.sa.SetApplierInfo(snapshotApplierInfo) + + blockInfo, err := a.currentBlockInfo() + if err != nil { + return errors.Wrapf(err, "failed to get current block info, blockchain height is %d", params.blockchainHeight) + } + currentBlockHeight := blockInfo.Height + hasher, err := newTxSnapshotHasherDefault() + if err != nil { + return errors.Wrapf(err, "failed to create tx snapshot default hasher, block height is %d", currentBlockHeight) + } + defer hasher.Release() + + stateHash, err := a.createInitialDiffAndStateHash(params, hasParent, blockInfo, hasher) if err != nil { return err } - blockRewardDistribution, err := a.stor.features.newestIsActivated(int16(settings.BlockRewardDistribution)) + var blockSnapshot proto.BlockSnapshot + if params.optionalSnapshot != nil { + blockSnapshot = *params.optionalSnapshot + stateHash, err = a.applySnapshotInLightNode(params, blockInfo, blockSnapshot, stateHash, hasher) + } else { + blockSnapshot, stateHash, err = a.appendTxs(params, checkerInfo, blockInfo, stateHash, hasher) + } if err != nil { return err } + // check whether the calculated snapshot state hash equals with the provided one + if blockStateHash, present := params.block.GetStateHash(); present && blockStateHash != stateHash { + return errors.Wrapf(errBlockSnapshotStateHashMismatch, "state hash mismatch; provided '%s', caluclated '%s'", + blockStateHash.String(), stateHash.String(), + ) + } + + blockID := params.block.BlockID() + if ssErr := a.stor.snapshots.saveSnapshots(blockID, currentBlockHeight, blockSnapshot); ssErr != nil { + return ssErr + } + // clean up legacy state hash records with zero diffs + a.txHandler.sa.filterZeroDiffsSHOut(blockID) + + if shErr := a.stor.stateHashes.saveSnapshotStateHash(stateHash, currentBlockHeight, blockID); shErr != nil { + return errors.Wrapf(shErr, "failed to save block shasnpt hash at height %d", currentBlockHeight) + } + // Save fee distribution of this block. + // This will be needed for createMinerAndRewardDiff() of next block due to NG. + return a.blockDiffer.saveCurFeeDistr(params.block) +} + +func (a *txAppender) createCheckerInfo(params *appendBlockParams) (*checkerInfo, error) { + rideV5Activated, err := a.stor.features.newestIsActivated(int16(settings.RideV5)) + if err != nil { + return nil, err + } + rideV6Activated, err := a.stor.features.newestIsActivated(int16(settings.RideV6)) + if err != nil { + return nil, err + } + blockRewardDistribution, err := a.stor.features.newestIsActivated(int16(settings.BlockRewardDistribution)) + if err != nil { + return nil, err + } checkerInfo := &checkerInfo{ currentTimestamp: params.block.Timestamp, blockID: params.block.BlockID(), @@ -688,54 +851,44 @@ func (a *txAppender) appendBlock(params *appendBlockParams) error { rideV6Activated: rideV6Activated, blockRewardDistribution: blockRewardDistribution, } - hasParent := params.parent != nil - if hasParent { - checkerInfo.parentTimestamp = params.parent.Timestamp - } + return checkerInfo, nil +} - // Set new applier info with applying block context. - snapshotApplierInfo := newBlockSnapshotsApplierInfo(checkerInfo, a.settings.AddressSchemeCharacter) - a.txHandler.sa.SetApplierInfo(snapshotApplierInfo) +func (a *txAppender) createInitialDiffAndStateHash( + params *appendBlockParams, + hasParent bool, + blockInfo *proto.BlockInfo, + hasher *txSnapshotHasher, +) (crypto.Digest, error) { // Create miner balance diff. // This adds 60% of prev block fees as very first balance diff of the current block in case NG is activated. // Before NG activation it adds all transactions fees to the miner's balance. minerAndRewardDiff, err := a.blockDiffer.createMinerAndRewardDiff(params.block, hasParent, params.transactions) if err != nil { - return err + return crypto.Digest{}, err } // create the initial snapshot initialSnapshot, err := a.txHandler.tp.createInitialBlockSnapshot(minerAndRewardDiff.balancesChanges()) if err != nil { - return errors.Wrap(err, "failed to create initial snapshot") - } - - blockInfo, err := a.currentBlockInfo() - if err != nil { - return errors.Wrapf(err, "failed to get current block info, blockchain height is %d", params.blockchainHeight) + return crypto.Digest{}, errors.Wrap(err, "failed to create initial snapshot") } currentBlockHeight := blockInfo.Height - hasher, err := newTxSnapshotHasherDefault() - if err != nil { - return errors.Wrapf(err, "failed to create tx snapshot default hasher, block height is %d", currentBlockHeight) - } - defer hasher.Release() - // Save miner diff first (for validation) if err = a.diffStor.saveTxDiff(minerAndRewardDiff); err != nil { - return err + return crypto.Digest{}, err } err = a.diffApplier.validateBalancesChanges(minerAndRewardDiff.balancesChanges()) if err != nil { - return errors.Wrap(err, "failed to validate miner reward changes") + return crypto.Digest{}, errors.Wrap(err, "failed to validate miner reward changes") } a.diffStor.reset() err = initialSnapshot.ApplyInitialSnapshot(a.txHandler.sa) if err != nil { - return errors.Wrap(err, "failed to apply an initial snapshot") + return crypto.Digest{}, errors.Wrap(err, "failed to apply an initial snapshot") } // hash block initial snapshot and fix snapshot in the context of the applying block @@ -752,88 +905,11 @@ func (a *txAppender) appendBlock(params *appendBlockParams) error { snapshotsToHash, ) if err != nil { - return errors.Wrapf(err, "failed to calculate initial snapshot hash for blockID %q at height %d", + return crypto.Digest{}, errors.Wrapf(err, "failed to calculate initial snapshot hash for blockID %q at height %d", params.block.BlockID(), currentBlockHeight, ) } - - blockV5Activated, err := a.stor.features.newestIsActivated(int16(settings.BlockV5)) - if err != nil { - return err - } - consensusImprovementsActivated, err := a.stor.features.newestIsActivated(int16(settings.ConsensusImprovements)) - if err != nil { - return err - } - blockRewardDistributionActivated, err := a.stor.features.newestIsActivated(int16(settings.BlockRewardDistribution)) - if err != nil { - return err - } - lightNodeActivated, err := a.stor.features.newestIsActivated(int16(settings.LightNode)) - if err != nil { - return err - } - // Check and append transactions. - var bs proto.BlockSnapshot - - for _, tx := range params.transactions { - appendTxArgs := &appendTxParams{ - chans: params.chans, - checkerInfo: checkerInfo, - blockInfo: blockInfo, - block: params.block, - acceptFailed: blockV5Activated, - blockV5Activated: blockV5Activated, - rideV5Activated: rideV5Activated, - rideV6Activated: rideV6Activated, - consensusImprovementsActivated: consensusImprovementsActivated, - blockRewardDistributionActivated: blockRewardDistributionActivated, - lightNodeActivated: lightNodeActivated, - validatingUtx: false, - currentMinerPK: params.block.GeneratorPublicKey, - } - txSnapshots, errAppendTx := a.appendTx(tx, appendTxArgs) - if errAppendTx != nil { - return errAppendTx - } - bs.AppendTxSnapshot(txSnapshots.regular) - txID, idErr := tx.GetID(a.settings.AddressSchemeCharacter) - if idErr != nil { - return idErr - } - - if len(txSnapshots.regular) == 0 { // sanity check - return errors.Errorf("snapshot of txID %q cannot be empty", base58.Encode(txID)) - } - txSh, shErr := calculateTxSnapshotStateHash(hasher, txID, currentBlockHeight, stateHash, txSnapshots.regular) - if shErr != nil { - return errors.Wrapf(shErr, "failed to calculate tx snapshot hash for txID %q at height %d", - base58.Encode(txID), currentBlockHeight, - ) - } - stateHash = txSh // update stateHash in order to accumulate state hashes into block snapshot hash - } - // check whether the calculated snapshot state hash equals with the provided one - if blockStateHash, present := params.block.GetStateHash(); present && blockStateHash != stateHash { - return errors.Wrapf(errBlockSnapshotStateHashMismatch, "state hash mismatch; provided '%s', caluclated '%s'", - blockStateHash.String(), stateHash.String(), - ) - } - blockID := params.block.BlockID() - if ssErr := a.stor.snapshots.saveSnapshots(blockID, currentBlockHeight, bs); ssErr != nil { - return errors.Wrapf(ssErr, "failed to save block snapshots at height %d", currentBlockHeight) - } - - // clean up legacy state hash records with zero diffs - a.txHandler.sa.filterZeroDiffsSHOut(blockID) - // TODO: check snapshot hash with the block snapshot hash if it exists - if shErr := a.stor.stateHashes.saveSnapshotStateHash(stateHash, currentBlockHeight, blockID); shErr != nil { - return errors.Wrapf(shErr, "failed to save block snapshot hash at height %d", currentBlockHeight) - } - - // Save fee distribution of this block. - // This will be needed for createMinerAndRewardDiff() of next block due to NG. - return a.blockDiffer.saveCurFeeDistr(params.block) + return stateHash, nil } // used only in tests now. All diffs are applied in snapshotApplier. @@ -1021,8 +1097,9 @@ func (a *txAppender) handleFallible( case proto.ExchangeTransaction: applicationRes, err := a.handleExchange(tx, info) return nil, applicationRes, err + default: + return nil, nil, errors.Errorf("transaction (%T) is not fallible", tx) } - return nil, nil, errors.New("transaction is not fallible") } // For UTX validation. diff --git a/pkg/state/headers_validation_test.go b/pkg/state/headers_validation_test.go index 75c343e14..f40c3fe02 100644 --- a/pkg/state/headers_validation_test.go +++ b/pkg/state/headers_validation_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/wavesplatform/gowaves/pkg/crypto" "github.com/wavesplatform/gowaves/pkg/keyvalue" "github.com/wavesplatform/gowaves/pkg/libs/ntptime" @@ -34,8 +35,8 @@ func applyBlocks(t *testing.T, blocks []proto.Block, st State, scheme proto.Sche if blocksIndex != blocksBatchSize && height != blocksNumber { continue } - if err := st.AddBlocks(blocksBatch[:blocksIndex]); err != nil { - return err + if aErr := st.AddBlocks(blocksBatch[:blocksIndex]); aErr != nil { + return aErr } blocksIndex = 0 } diff --git a/pkg/state/invoke_applier_test.go b/pkg/state/invoke_applier_test.go index 1c9b6400a..2989ff51e 100644 --- a/pkg/state/invoke_applier_test.go +++ b/pkg/state/invoke_applier_test.go @@ -35,7 +35,7 @@ type invokeApplierTestObjects struct { } func createInvokeApplierTestObjects(t *testing.T) *invokeApplierTestObjects { - state, err := newStateManager(t.TempDir(), true, DefaultTestingStateParams(), settings.MainNetSettings) + state, err := newStateManager(t.TempDir(), true, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") to := &invokeApplierTestObjects{state} randGenesisBlockID := genRandBlockId(t) diff --git a/pkg/state/snapshot_generator_internal_test.go b/pkg/state/snapshot_generator_internal_test.go index f06cf9154..f1222bb9d 100644 --- a/pkg/state/snapshot_generator_internal_test.go +++ b/pkg/state/snapshot_generator_internal_test.go @@ -121,7 +121,7 @@ func TestDefaultTransferWavesAndAssetSnapshot(t *testing.T) { to.stor.flush(t) } -// TODO send only txBalanceChanges to perfomer +// TODO send only txBalanceChanges to performer func TestDefaultIssueTransactionSnapshot(t *testing.T) { to := createSnapshotGeneratorTestObjects(t) diff --git a/pkg/state/snapshot_hasher_internal_test.go b/pkg/state/snapshot_hasher_internal_test.go index 71b18c6d9..937af4983 100644 --- a/pkg/state/snapshot_hasher_internal_test.go +++ b/pkg/state/snapshot_hasher_internal_test.go @@ -139,7 +139,7 @@ func TestTxSnapshotHasher(t *testing.T) { }, { testCaseName: "asset_volume_two's_complement", - pbInBase64: "MicKIOfYm9p3M/NiYXCvwCU3ho5eVFpwE5iekWev4QXhZMvuEAEaAcg=", + pbInBase64: "MigKIOfYm9p3M/NiYXCvwCU3ho5eVFpwE5iekWev4QXhZMvuEAEaAgDI", prevStateHashHex: "6502773294f32cc1702d374ffc1e67ee278cd63c5f00432f80f64a689fcb17f9", expectedStateHashHex: "b5f7e36556cb0d9a72bc9612be017a3cf174cfcb059d86c91621bfe7e8b74ff1", transactionIDBase58: "Gc2kPdPb1qrCPMy1Ga6SD5PDs2Equa6aazxhKjtDzrv1", // valid txID from testnet diff --git a/pkg/state/state.go b/pkg/state/state.go index c04059e4d..4584865c2 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -35,8 +35,9 @@ const ( var empty struct{} func wrapErr(stateErrorType ErrorType, err error) error { - switch err.(type) { - case StateError: + var stateError StateError + switch { + case errors.As(err, &stateError): return err default: return NewStateError(stateErrorType, err) @@ -266,6 +267,7 @@ type newBlocks struct { binary bool binBlocks [][]byte blocks []*proto.Block + snapshots []*proto.BlockSnapshot curPos int rw *blockReadWriter @@ -304,6 +306,28 @@ func (n *newBlocks) setNew(blocks []*proto.Block) { n.binary = false } +func (n *newBlocks) setNewWithSnapshots(blocks []*proto.Block, snapshots []*proto.BlockSnapshot) error { + if len(blocks) != len(snapshots) { + return errors.New("the numbers of snapshots doesn't match the number of blocks") + } + n.reset() + n.blocks = blocks + n.snapshots = snapshots + n.binary = false + return nil +} + +func (n *newBlocks) setNewBinaryWithSnapshots(blocks [][]byte, snapshots []*proto.BlockSnapshot) error { + if len(blocks) != len(snapshots) { + return errors.New("the numbers of snapshots doesn't match the number of blocks") + } + n.reset() + n.binBlocks = blocks + n.snapshots = snapshots + n.binary = true + return nil +} + func (n *newBlocks) next() bool { n.curPos++ if n.binary { @@ -326,27 +350,36 @@ func (n *newBlocks) unmarshalBlock(block *proto.Block, blockBytes []byte) error return nil } -func (n *newBlocks) current() (*proto.Block, error) { +func (n *newBlocks) current() (*proto.Block, *proto.BlockSnapshot, error) { if !n.binary { if n.curPos > len(n.blocks) || n.curPos < 1 { - return nil, errors.New("bad current position") + return nil, nil, errors.New("bad current position") + } + var ( + pos = n.curPos - 1 + block = n.blocks[pos] + optionalSnapshot *proto.BlockSnapshot + ) + if len(n.snapshots) == len(n.blocks) { // return block with snapshot if it is set + optionalSnapshot = n.snapshots[pos] } - return n.blocks[n.curPos-1], nil + return block, optionalSnapshot, nil } if n.curPos > len(n.binBlocks) || n.curPos < 1 { - return nil, errors.New("bad current position") + return nil, nil, errors.New("bad current position") } blockBytes := n.binBlocks[n.curPos-1] b := &proto.Block{} if err := n.unmarshalBlock(b, blockBytes); err != nil { - return nil, err + return nil, nil, err } - return b, nil + return b, nil, nil } func (n *newBlocks) reset() { n.binBlocks = nil n.blocks = nil + n.snapshots = nil n.curPos = 0 } @@ -374,11 +407,81 @@ type stateManager struct { verificationGoroutinesNum int newBlocks *newBlocks + + enableLightNode bool } -func newStateManager(dataDir string, amend bool, params StateParams, settings *settings.BlockchainSettings) (*stateManager, error) { - err := validateSettings(settings) +func initDatabase( + dataDir, blockStorageDir string, + amend bool, + params StateParams, +) (*keyvalue.KeyVal, keyvalue.Batch, *stateDB, bool, error) { + dbDir := filepath.Join(dataDir, keyvalueDir) + zap.S().Info("Initializing state database, will take up to few minutes...") + params.DbParams.BloomFilterParams.Store.WithPath(filepath.Join(blockStorageDir, "bloom")) + db, err := keyvalue.NewKeyVal(dbDir, params.DbParams) + if err != nil { + return nil, nil, nil, false, wrapErr(Other, errors.Wrap(err, "failed to create db")) + } + zap.S().Info("Finished initializing database") + dbBatch, err := db.NewBatch() if err != nil { + return nil, nil, nil, false, wrapErr(Other, errors.Wrap(err, "failed to create db batch")) + } + sdb, err := newStateDB(db, dbBatch, params) + if err != nil { + return nil, nil, nil, false, wrapErr(Other, errors.Wrap(err, "failed to create stateDB")) + } + if cErr := checkCompatibility(sdb, params); cErr != nil { + return nil, nil, nil, false, wrapErr(IncompatibilityError, cErr) + } + handledAmend, err := handleAmendFlag(sdb, amend) + if err != nil { + return nil, nil, nil, false, wrapErr(Other, errors.Wrap(err, "failed to handle amend flag")) + } + return db, dbBatch, sdb, handledAmend, nil +} + +func initGenesis(state *stateManager, height uint64, settings *settings.BlockchainSettings) error { + state.setGenesisBlock(&settings.Genesis) + // 0 state height means that no blocks are found in state, so blockchain history is empty and we have to add genesis + if height == 0 { + // Assign unique block number for this block ID, add this number to the list of valid blocks + if err := state.stateDB.addBlock(settings.Genesis.BlockID()); err != nil { + return err + } + if err := state.addGenesisBlock(); err != nil { + return errors.Wrap(err, "failed to apply/save genesis") + } + // We apply pre-activated features after genesis block, so they aren't active in genesis itself + if err := state.applyPreActivatedFeatures(settings.PreactivatedFeatures, settings.Genesis.BlockID()); err != nil { + return errors.Wrap(err, "failed to apply pre-activated features") + } + } + + // check the correct blockchain is being loaded + genesis, err := state.BlockByHeight(1) + if err != nil { + return errors.Wrap(err, "failed to get genesis block from state") + } + err = settings.Genesis.GenerateBlockID(settings.AddressSchemeCharacter) + if err != nil { + return errors.Wrap(err, "failed to generate genesis block id from config") + } + if !bytes.Equal(genesis.ID.Bytes(), settings.Genesis.ID.Bytes()) { + return errors.New("genesis blocks from state and config mismatch") + } + return nil +} + +func newStateManager( + dataDir string, + amend bool, + params StateParams, + settings *settings.BlockchainSettings, + enableLightNode bool, +) (*stateManager, error) { + if err := validateSettings(settings); err != nil { return nil, err } if _, err := os.Stat(dataDir); errors.Is(err, fs.ErrNotExist) { @@ -393,42 +496,23 @@ func newStateManager(dataDir string, amend bool, params StateParams, settings *s } } // Initialize database. - dbDir := filepath.Join(dataDir, keyvalueDir) - zap.S().Info("Initializing state database, will take up to few minutes...") - params.DbParams.BloomFilterParams.Store.WithPath(filepath.Join(blockStorageDir, "bloom")) - db, err := keyvalue.NewKeyVal(dbDir, params.DbParams) - if err != nil { - return nil, wrapErr(Other, errors.Wrap(err, "failed to create db")) - } - zap.S().Info("Finished initializing database") - dbBatch, err := db.NewBatch() - if err != nil { - return nil, wrapErr(Other, errors.Wrap(err, "failed to create db batch")) - } - stateDB, err := newStateDB(db, dbBatch, params) - if err != nil { - return nil, wrapErr(Other, errors.Wrap(err, "failed to create stateDB")) - } - if err := checkCompatibility(stateDB, params); err != nil { - return nil, wrapErr(IncompatibilityError, err) - } - handledAmend, err := handleAmendFlag(stateDB, amend) + db, dbBatch, sdb, handledAmend, err := initDatabase(dataDir, blockStorageDir, amend, params) if err != nil { - return nil, wrapErr(Other, errors.Wrap(err, "failed to handle amend flag")) + return nil, err } // rw is storage for blocks. rw, err := newBlockReadWriter( blockStorageDir, params.OffsetLen, params.HeaderOffsetLen, - stateDB, + sdb, settings.AddressSchemeCharacter, ) if err != nil { return nil, wrapErr(Other, errors.Errorf("failed to create block storage: %v", err)) } - stateDB.setRw(rw) - hs, err := newHistoryStorage(db, dbBatch, stateDB, handledAmend) + sdb.setRw(rw) + hs, err := newHistoryStorage(db, dbBatch, sdb, handledAmend) if err != nil { return nil, wrapErr(Other, errors.Errorf("failed to create history storage: %v", err)) } @@ -443,33 +527,25 @@ func newStateManager(dataDir string, amend bool, params StateParams, settings *s maxFileSize: MaxAddressTransactionsFileSize, providesData: params.ProvideExtendedApi, } - atx, err := newAddressTransactions( - db, - stateDB, - rw, - atxParams, - handledAmend, - ) + atx, err := newAddressTransactions(db, sdb, rw, atxParams, handledAmend) if err != nil { return nil, wrapErr(Other, errors.Errorf("failed to create address transactions storage: %v", err)) } state := &stateManager{ mu: &sync.RWMutex{}, - stateDB: stateDB, + stateDB: sdb, stor: stor, rw: rw, settings: settings, atx: atx, verificationGoroutinesNum: params.VerificationGoroutinesNum, newBlocks: newNewBlocks(rw, settings), + enableLightNode: enableLightNode, } // Set fields which depend on state. // Consensus validator is needed to check block headers. - snapshotApplier := newBlockSnapshotsApplier( - nil, - newSnapshotApplierStorages(stor, rw), - ) - appender, err := newTxAppender(state, rw, stor, settings, stateDB, atx, &snapshotApplier) + snapshotApplier := newBlockSnapshotsApplier(nil, newSnapshotApplierStorages(stor, rw)) + appender, err := newTxAppender(state, rw, stor, settings, sdb, atx, &snapshotApplier) if err != nil { return nil, wrapErr(Other, err) } @@ -480,35 +556,10 @@ func newStateManager(dataDir string, amend bool, params StateParams, settings *s if err != nil { return nil, err } - state.setGenesisBlock(&settings.Genesis) - // 0 state height means that no blocks are found in state, so blockchain history is empty and we have to add genesis - if height == 0 { - // Assign unique block number for this block ID, add this number to the list of valid blocks - if err := state.stateDB.addBlock(settings.Genesis.BlockID()); err != nil { - return nil, err - } - if err := state.addGenesisBlock(); err != nil { - return nil, errors.Wrap(err, "failed to apply/save genesis") - } - // We apply pre-activated features after genesis block, so they aren't active in genesis itself - if err := state.applyPreActivatedFeatures(settings.PreactivatedFeatures, settings.Genesis.BlockID()); err != nil { - return nil, errors.Wrap(err, "failed to apply pre-activated features") - } - } - // check the correct blockchain is being loaded - genesis, err := state.BlockByHeight(1) - if err != nil { - return nil, errors.Wrap(err, "failed to get genesis block from state") + if gErr := initGenesis(state, height, settings); gErr != nil { + return nil, gErr } - err = settings.Genesis.GenerateBlockID(settings.AddressSchemeCharacter) - if err != nil { - return nil, errors.Wrap(err, "failed to generate genesis block id from config") - } - if !bytes.Equal(genesis.ID.Bytes(), settings.Genesis.ID.Bytes()) { - return nil, errors.Errorf("genesis blocks from state and config mismatch") - } - if err := state.loadLastBlock(); err != nil { return nil, wrapErr(RetrievalError, err) } @@ -598,7 +649,7 @@ func (s *stateManager) addGenesisBlock() error { chans := launchVerifier(ctx, s.verificationGoroutinesNum, s.settings.AddressSchemeCharacter) - if err := s.addNewBlock(s.genesis, nil, chans, 0, nil, initSH); err != nil { + if err := s.addNewBlock(s.genesis, nil, chans, 0, nil, nil, initSH); err != nil { return err } if err := s.stor.hitSources.appendBlockHitSource(s.genesis, 1, s.genesis.GenSignature); err != nil { @@ -825,7 +876,7 @@ func (s *stateManager) newestAssetBalance(addr proto.AddressID, asset proto.Asse // Retrieve the latest balance diff as for the moment of this function call. key := assetBalanceKey{address: addr, asset: asset} diff, err := s.appender.diffStorInvoke.latestDiffByKey(string(key.bytes())) - if err == errNotFound { + if errors.Is(err, errNotFound) { // If there is no diff, old balance is the newest. return balance, nil } else if err != nil { @@ -848,7 +899,7 @@ func (s *stateManager) newestWavesBalanceProfile(addr proto.AddressID) (balanceP // Retrieve the latest balance diff as for the moment of this function call. key := wavesBalanceKey{address: addr} diff, err := s.appender.diffStorInvoke.latestDiffByKey(string(key.bytes())) - if err == errNotFound { + if errors.Is(err, errNotFound) { // If there is no diff, old balance is the newest. return profile, nil } else if err != nil { @@ -1072,20 +1123,12 @@ func (s *stateManager) addNewBlock( block, parent *proto.Block, chans *verifierChans, blockchainHeight uint64, + optionalSnapshot *proto.BlockSnapshot, fixSnapshotsToInitialHash []proto.AtomicSnapshot, lastSnapshotStateHash crypto.Digest, ) error { blockHeight := blockchainHeight + 1 - // Add score. - if err := s.stor.scores.appendBlockScore(block, blockHeight); err != nil { - return err - } - // Indicate new block for storage. - if err := s.rw.startBlock(block.BlockID()); err != nil { - return err - } - // Save block header to block storage. - if err := s.rw.writeBlockHeader(&block.BlockHeader); err != nil { + if err := s.beforeAppendBlock(block, blockHeight); err != nil { return err } transactions := block.Transactions @@ -1104,11 +1147,29 @@ func (s *stateManager) addNewBlock( blockchainHeight: blockchainHeight, fixSnapshotsToInitialHash: fixSnapshotsToInitialHash, lastSnapshotStateHash: lastSnapshotStateHash, + optionalSnapshot: optionalSnapshot, } // Check and perform block's transactions, create balance diffs, write transactions to storage. if err := s.appender.appendBlock(params); err != nil { return err } + return s.afterAppendBlock(block, blockHeight) +} + +func (s *stateManager) beforeAppendBlock(block *proto.Block, blockHeight proto.Height) error { + // Add score. + if err := s.stor.scores.appendBlockScore(block, blockHeight); err != nil { + return err + } + // Indicate new block for storage. + if err := s.rw.startBlock(block.BlockID()); err != nil { + return err + } + // Save block header to block storage. + return s.rw.writeBlockHeader(&block.BlockHeader) +} + +func (s *stateManager) afterAppendBlock(block *proto.Block, blockHeight proto.Height) error { // Let block storage know that the current block is over. if err := s.rw.finishBlock(block.BlockID()); err != nil { return err @@ -1189,9 +1250,41 @@ func (s *stateManager) AddBlocks(blockBytes [][]byte) error { return nil } -func (s *stateManager) AddDeserializedBlocks(blocks []*proto.Block) (*proto.Block, error) { +func (s *stateManager) AddBlocksWithSnapshots(blockBytes [][]byte, snapshots []*proto.BlockSnapshot) error { + if err := s.newBlocks.setNewBinaryWithSnapshots(blockBytes, snapshots); err != nil { + return errors.Wrap(err, "failed to set new blocks with snapshots") + } + if _, err := s.addBlocks(); err != nil { + if snErr := s.rw.syncWithDb(); snErr != nil { + zap.S().Fatalf("Failed to add blocks and can not sync block storage with the database after failure: %v", snErr) + } + return err + } + return nil +} + +func (s *stateManager) AddDeserializedBlocks( + blocks []*proto.Block, +) (*proto.Block, error) { s.newBlocks.setNew(blocks) lastBlock, err := s.addBlocks() + if err != nil { + if err = s.rw.syncWithDb(); err != nil { + zap.S().Fatalf("Failed to add blocks and can not sync block storage with the database after failure: %v", err) + } + return nil, err + } + return lastBlock, nil +} + +func (s *stateManager) AddDeserializedBlocksWithSnapshots( + blocks []*proto.Block, + snapshots []*proto.BlockSnapshot, +) (*proto.Block, error) { + if err := s.newBlocks.setNewWithSnapshots(blocks, snapshots); err != nil { + return nil, errors.Wrap(err, "failed to set new blocks with snapshots") + } + lastBlock, err := s.addBlocks() if err != nil { if err := s.rw.syncWithDb(); err != nil { zap.S().Fatalf("Failed to add blocks and can not sync block storage with the database after failure: %v", err) @@ -1391,11 +1484,11 @@ func (s *stateManager) blockchainHeightAction(blockchainHeight uint64, lastBlock } } - needToRecalc, err := s.needToRecalculateVotesAfterCappedRewardActivationInVotingPeriod(blockchainHeight) + needToRecalculate, err := s.needToRecalculateVotesAfterCappedRewardActivationInVotingPeriod(blockchainHeight) if err != nil { return err } - if needToRecalc { // one time action + if needToRecalculate { // one time action if err := s.recalculateVotesAfterCappedRewardActivationInVotingPeriod(blockchainHeight, lastBlock); err != nil { return errors.Wrap(err, "failed to recalculate monetary policy votes") } @@ -1598,45 +1691,20 @@ func (s *stateManager) addBlocks() (*proto.Block, error) { pos := 0 for s.newBlocks.next() { blockchainCurHeight := height + uint64(pos) - block, err := s.newBlocks.current() - if err != nil { - return nil, wrapErr(DeserializationError, err) - } - // Assign unique block number for this block ID, add this number to the list of valid blocks. - if blErr := s.stateDB.addBlock(block.BlockID()); blErr != nil { - return nil, wrapErr(ModificationError, blErr) - } - // At some blockchain heights specific logic is performed. - // This includes voting for features, block rewards and so on. - // It also disables stolen aliases. - if err := s.blockchainHeightAction(blockchainCurHeight, lastAppliedBlock.BlockID(), block.BlockID()); err != nil { - return nil, wrapErr(ModificationError, err) + block, optionalSnapshot, errCurBlock := s.newBlocks.current() + if errCurBlock != nil { + return nil, wrapErr(DeserializationError, errCurBlock) } - if vhErr := s.cv.ValidateHeaderBeforeBlockApplying(&block.BlockHeader, blockchainCurHeight); vhErr != nil { - return nil, vhErr - } - // Send block for signature verification, which works in separate goroutine. - task := &verifyTask{ - taskType: verifyBlock, - parentID: lastAppliedBlock.BlockID(), - block: block, - } - if err := chans.trySend(task); err != nil { - return nil, err - } - hs, err := s.cv.GenerateHitSource(blockchainCurHeight, block.BlockHeader) - if err != nil { + + if err = s.beforeAddingBlock(block, lastAppliedBlock, blockchainCurHeight, chans); err != nil { return nil, err } - sh, err := s.stor.stateHashes.newestSnapshotStateHash(blockchainCurHeight) - if err != nil { - return nil, errors.Wrapf(err, "failed to get newest snapshot state hash for height %d", + sh, errSh := s.stor.stateHashes.newestSnapshotStateHash(blockchainCurHeight) + if errSh != nil { + return nil, errors.Wrapf(errSh, "failed to get newest snapshot state hash for height %d", blockchainCurHeight, ) } - if err := s.stor.hitSources.appendBlockHitSource(block, blockchainCurHeight+1, hs); err != nil { - return nil, err - } // Generate blockchain fix snapshots for the applying block. fixSnapshots, gbfErr := s.generateBlockchainFix(blockchainCurHeight+1, block.BlockID()) @@ -1653,7 +1721,8 @@ func (s *stateManager) addBlocks() (*proto.Block, error) { fixSnapshotsToInitialHash := fixSnapshots // at the block applying stage fix snapshots are only used for hashing // Save block to storage, check its transactions, create and save balance diffs for its transactions. - addErr := s.addNewBlock(block, lastAppliedBlock, chans, blockchainCurHeight, fixSnapshotsToInitialHash, sh) + addErr := s.addNewBlock( + block, lastAppliedBlock, chans, blockchainCurHeight, optionalSnapshot, fixSnapshotsToInitialHash, sh) if addErr != nil { return nil, addErr } @@ -1687,11 +1756,11 @@ func (s *stateManager) addBlocks() (*proto.Block, error) { return nil, wrapErr(ModificationError, shErr) } // Validate consensus (i.e. that all the new blocks were mined fairly). - if err := s.cv.ValidateHeadersBatch(headers[:pos], height); err != nil { + if err = s.cv.ValidateHeadersBatch(headers[:pos], height); err != nil { return nil, wrapErr(ValidationError, err) } // After everything is validated, save all the changes to DB. - if err := s.flush(); err != nil { + if err = s.flush(); err != nil { return nil, wrapErr(ModificationError, err) } zap.S().Infof( @@ -1704,6 +1773,40 @@ func (s *stateManager) addBlocks() (*proto.Block, error) { return lastAppliedBlock, nil } +func (s *stateManager) beforeAddingBlock( + block, lastAppliedBlock *proto.Block, + blockchainCurHeight proto.Height, + chans *verifierChans, +) error { + // Assign unique block number for this block ID, add this number to the list of valid blocks. + if blErr := s.stateDB.addBlock(block.BlockID()); blErr != nil { + return wrapErr(ModificationError, blErr) + } + // At some blockchain heights specific logic is performed. + // This includes voting for features, block rewards and so on. + if err := s.blockchainHeightAction(blockchainCurHeight, lastAppliedBlock.BlockID(), block.BlockID()); err != nil { + return wrapErr(ModificationError, err) + } + if vhErr := s.cv.ValidateHeaderBeforeBlockApplying(&block.BlockHeader, blockchainCurHeight); vhErr != nil { + return vhErr + } + // Send block for signature verification, which works in separate goroutine. + task := &verifyTask{ + taskType: verifyBlock, + parentID: lastAppliedBlock.BlockID(), + block: block, + } + if err := chans.trySend(task); err != nil { + return err + } + hs, err := s.cv.GenerateHitSource(blockchainCurHeight, block.BlockHeader) + if err != nil { + return err + } + + return s.stor.hitSources.appendBlockHitSource(block, blockchainCurHeight+1, hs) +} + func (s *stateManager) checkRollbackHeight(height uint64) error { maxHeight, err := s.Height() if err != nil { diff --git a/pkg/state/state_test.go b/pkg/state/state_test.go index f522d0307..946151efa 100644 --- a/pkg/state/state_test.go +++ b/pkg/state/state_test.go @@ -36,7 +36,7 @@ func bigFromStr(s string) *big.Int { func newTestState(t *testing.T, amend bool, params StateParams, settings *settings.BlockchainSettings) State { dataDir := t.TempDir() - m, err := NewState(dataDir, amend, params, settings) + m, err := NewState(dataDir, amend, params, settings, false) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, m.Close(), "manager.Close() failed") @@ -46,7 +46,7 @@ func newTestState(t *testing.T, amend bool, params StateParams, settings *settin func newTestStateManager(t *testing.T, amend bool, params StateParams, settings *settings.BlockchainSettings) *stateManager { dataDir := t.TempDir() - m, err := newStateManager(dataDir, amend, params, settings) + m, err := newStateManager(dataDir, amend, params, settings, false) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, m.Close(), "manager.Close() failed") @@ -57,7 +57,7 @@ func newTestStateManager(t *testing.T, amend bool, params StateParams, settings func TestHandleAmendFlag(t *testing.T) { dataDir := t.TempDir() // first open with false amend - manager, err := newStateManager(dataDir, false, DefaultTestingStateParams(), settings.MainNetSettings) + manager, err := newStateManager(dataDir, false, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") t.Cleanup(func() { assert.NoError(t, manager.Close(), "manager.Close() failed") @@ -66,18 +66,18 @@ func TestHandleAmendFlag(t *testing.T) { // open with true amend assert.NoError(t, manager.Close(), "manager.Close() failed") - manager, err = newStateManager(dataDir, true, DefaultTestingStateParams(), settings.MainNetSettings) + manager, err = newStateManager(dataDir, true, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") assert.True(t, manager.stor.hs.amend) // open with false amend again. Result amend should be true assert.NoError(t, manager.Close(), "manager.Close() failed") - manager, err = newStateManager(dataDir, false, DefaultTestingStateParams(), settings.MainNetSettings) + manager, err = newStateManager(dataDir, false, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") assert.True(t, manager.stor.hs.amend) // first open with true amend - newManager, err := newStateManager(t.TempDir(), true, DefaultTestingStateParams(), settings.MainNetSettings) + newManager, err := newStateManager(t.TempDir(), true, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") t.Cleanup(func() { assert.NoError(t, newManager.Close(), "newManager.Close() failed") @@ -126,7 +126,11 @@ func TestValidationWithoutBlocks(t *testing.T) { assert.NoError(t, err, "readBlocksFromTestPath() failed") last := blocks[len(blocks)-1] txs := last.Transactions - err = importer.ApplyFromFile(context.Background(), manager, blocksPath, height, 1) + err = importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, + height, 1) assert.NoError(t, err, "ApplyFromFile() failed") err = validateTxs(manager, last.Timestamp, txs) assert.NoError(t, err, "validateTxs() failed") @@ -184,24 +188,28 @@ func TestStateRollback(t *testing.T) { } for _, tc := range tests { - height, err := manager.Height() - if err != nil { - t.Fatalf("Height(): %v\n", err) + height, hErr := manager.Height() + if hErr != nil { + t.Fatalf("Height(): %v\n", hErr) } if tc.nextHeight > height { - aErr := importer.ApplyFromFile(context.Background(), manager, blocksPath, tc.nextHeight-1, height) - if aErr != nil { + if aErr := importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, + tc.nextHeight-1, height, + ); aErr != nil { t.Fatalf("Failed to import: %v\n", aErr) } } else { - if err := manager.RollbackToHeight(tc.nextHeight); err != nil { - t.Fatalf("Rollback(): %v\n", err) + if rErr := manager.RollbackToHeight(tc.nextHeight); rErr != nil { + t.Fatalf("Rollback(): %v\n", rErr) } } - if err := importer.CheckBalances(manager, tc.balancesPath); err != nil { - t.Fatalf("CheckBalances(): %v\n", err) + if cErr := importer.CheckBalances(manager, tc.balancesPath); cErr != nil { + t.Fatalf("CheckBalances(): %v\n", cErr) } - if err := manager.RollbackToHeight(tc.minRollbackHeight - 1); err == nil { + if rErr := manager.RollbackToHeight(tc.minRollbackHeight - 1); rErr == nil { t.Fatalf("Rollback() did not fail with height less than minimum valid.") } } @@ -226,16 +234,21 @@ func TestStateIntegrated(t *testing.T) { // Test what happens in case of failure: we add blocks starting from wrong height. // State should be rolled back to previous state and ready to use after. wrongStartHeight := uint64(100) - ctx := context.Background() - if applyErr := importer.ApplyFromFile(ctx, manager, blocksPath, blocksToImport, wrongStartHeight); applyErr == nil { + if aErr := importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, blocksToImport, wrongStartHeight); aErr == nil { t.Errorf("Import starting from wrong height must fail but it doesn't.") } // Test normal import. - if applyErr := importer.ApplyFromFile(ctx, manager, blocksPath, blocksToImport, 1); applyErr != nil { - t.Fatalf("Failed to import: %v\n", applyErr) + if aErr := importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, blocksToImport, 1); aErr != nil { + t.Fatalf("Failed to import: %v\n", aErr) } - if err := importer.CheckBalances(manager, balancesPath); err != nil { - t.Fatalf("CheckBalances(): %v\n", err) + if cErr := importer.CheckBalances(manager, balancesPath); cErr != nil { + t.Fatalf("CheckBalances(): %v\n", cErr) } score, err := manager.ScoreAtHeight(blocksToImport + 1) if err != nil { @@ -301,7 +314,10 @@ func TestPreactivatedFeatures(t *testing.T) { assert.Equal(t, true, approved) // Apply blocks. height := uint64(75) - err = importer.ApplyFromFile(context.Background(), manager, blocksPath, height, 1) + err = importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, height, 1) assert.NoError(t, err, "ApplyFromFile() failed") // Check activation and approval heights. activationHeight, err := manager.ActivationHeight(featureID) @@ -319,7 +335,10 @@ func TestDisallowDuplicateTxIds(t *testing.T) { // Apply blocks. height := uint64(75) - err = importer.ApplyFromFile(context.Background(), manager, blocksPath, height, 1) + err = importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, height, 1) assert.NoError(t, err, "ApplyFromFile() failed") // Now validate tx with ID which is already in the state. tx := existingGenesisTx(t) @@ -338,7 +357,10 @@ func TestTransactionByID(t *testing.T) { // Apply blocks. height := uint64(75) - err = importer.ApplyFromFile(context.Background(), manager, blocksPath, height, 1) + err = importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, height, 1) assert.NoError(t, err, "ApplyFromFile() failed") // Retrieve existing MainNet genesis tx by its ID. @@ -354,7 +376,7 @@ func TestStateManager_TopBlock(t *testing.T) { blocksPath, err := blocksPath() assert.NoError(t, err) dataDir := t.TempDir() - manager, err := newStateManager(dataDir, true, DefaultTestingStateParams(), settings.MainNetSettings) + manager, err := newStateManager(dataDir, true, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") t.Cleanup(func() { @@ -367,7 +389,10 @@ func TestStateManager_TopBlock(t *testing.T) { assert.Equal(t, genesis, manager.TopBlock()) height := proto.Height(100) - err = importer.ApplyFromFile(context.Background(), manager, blocksPath, height-1, 1) + err = importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, height-1, 1) assert.NoError(t, err, "ApplyFromFile() failed") correct, err := manager.BlockByHeight(height) @@ -385,7 +410,7 @@ func TestStateManager_TopBlock(t *testing.T) { // Test after closure. err = manager.Close() assert.NoError(t, err, "manager.Close() failed") - manager, err = newStateManager(dataDir, true, DefaultTestingStateParams(), settings.MainNetSettings) + manager, err = newStateManager(dataDir, true, DefaultTestingStateParams(), settings.MainNetSettings, false) assert.NoError(t, err, "newStateManager() failed") assert.Equal(t, correct, manager.TopBlock()) } @@ -413,7 +438,10 @@ func TestStateHashAtHeight(t *testing.T) { blocksPath, err := blocksPath() assert.NoError(t, err) - err = importer.ApplyFromFile(context.Background(), manager, blocksPath, 9499, 1) + err = importer.ApplyFromFile( + context.Background(), + importer.ImportParams{Schema: proto.MainNetScheme, BlockchainPath: blocksPath, LightNodeMode: false}, + manager, 9499, 1) assert.NoError(t, err, "ApplyFromFile() failed") stateHash, err := manager.LegacyStateHashAtHeight(9500) assert.NoError(t, err, "LegacyStateHashAtHeight failed") diff --git a/pkg/state/threadsafe_wrapper.go b/pkg/state/threadsafe_wrapper.go index 67a559d1a..ba2b7bed0 100644 --- a/pkg/state/threadsafe_wrapper.go +++ b/pkg/state/threadsafe_wrapper.go @@ -421,7 +421,9 @@ func (a *ThreadSafeWriteWrapper) AddBlock(block []byte) (*proto.Block, error) { return a.s.AddBlock(block) } -func (a *ThreadSafeWriteWrapper) AddDeserializedBlock(block *proto.Block) (*proto.Block, error) { +func (a *ThreadSafeWriteWrapper) AddDeserializedBlock( + block *proto.Block, +) (*proto.Block, error) { a.lock() defer a.unlock() return a.s.AddDeserializedBlock(block) @@ -433,12 +435,29 @@ func (a *ThreadSafeWriteWrapper) AddBlocks(blocks [][]byte) error { return a.s.AddBlocks(blocks) } -func (a *ThreadSafeWriteWrapper) AddDeserializedBlocks(blocks []*proto.Block) (*proto.Block, error) { +func (a *ThreadSafeWriteWrapper) AddBlocksWithSnapshots(blocks [][]byte, snapshots []*proto.BlockSnapshot) error { + a.lock() + defer a.unlock() + return a.s.AddBlocksWithSnapshots(blocks, snapshots) +} + +func (a *ThreadSafeWriteWrapper) AddDeserializedBlocks( + blocks []*proto.Block, +) (*proto.Block, error) { a.lock() defer a.unlock() return a.s.AddDeserializedBlocks(blocks) } +func (a *ThreadSafeWriteWrapper) AddDeserializedBlocksWithSnapshots( + blocks []*proto.Block, + snapshots []*proto.BlockSnapshot, +) (*proto.Block, error) { + a.lock() + defer a.unlock() + return a.s.AddDeserializedBlocksWithSnapshots(blocks, snapshots) +} + func (a *ThreadSafeWriteWrapper) RollbackToHeight(height proto.Height) error { a.lock() defer a.unlock()