diff --git a/Dockerfile b/Dockerfile index d79027fdc9..d575df9a93 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # Build Geth in a stock Go builder container FROM golang:1.20.0-alpine3.17 as builder -RUN apk add --no-cache make gcc musl-dev linux-headers git +RUN apk add --no-cache make gcc musl-dev linux-headers git libstdc++-dev COPY . /opt RUN cd /opt && make ronin @@ -23,6 +23,12 @@ ENV NODEKEY '' ENV FORCE_INIT 'true' ENV RONIN_PARAMS '' ENV INIT_FORCE_OVERRIDE_CHAIN_CONFIG 'false' +ENV ENABLE_FAST_FINALITY 'true' +ENV ENABLE_FAST_FINALITY_SIGN 'true' +ENV BLS_PRIVATE_KEY '' +ENV BLS_PASSWORD '' +ENV BLS_AUTO_GENERATE 'false' +ENV BLS_SHOW_PRIVATE_KEY 'false' COPY --from=builder /opt/build/bin/ronin /usr/local/bin/ronin COPY --from=builder /opt/genesis/ ./ diff --git a/Makefile b/Makefile index 1357a4f3a6..e4505f70d3 100644 --- a/Makefile +++ b/Makefile @@ -8,9 +8,10 @@ .PHONY: ronin-darwin ronin-darwin-386 geth-darwin-amd64 .PHONY: ronin-windows ronin-windows-386 geth-windows-amd64 +CFLAGS = "-O -D__BLST_PORTABLE__" GOBIN = ./build/bin GO ?= latest -GORUN = go run +GORUN = CGO_CFLAGS_ALLOW=$(CFLAGS) CGO_CFLAGS=$(CFLAGS) go run RONIN_CONTRACTS_PATH = ../ronin-dpos-contracts RONIN_CONTRACTS_OUTPUT_PATH = ./tmp/contracts GEN_CONTRACTS_OUTPUT_PATH = ./consensus/consortium/generated_contracts @@ -31,6 +32,11 @@ ronin: @echo "Done building." @echo "Run \"$(GOBIN)/ronin\" to launch ronin." +ronin-race-detector: + $(GORUN) build/ci.go install --race ./cmd/ronin + @echo "Done building." + @echo "Run \"$(GOBIN)/ronin\" to launch ronin." + bootnode: $(GORUN) build/ci.go install ./cmd/bootnode @echo "Done building." @@ -66,7 +72,7 @@ clean: devtools: env GOBIN= go install golang.org/x/tools/cmd/stringer@latest - env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest + env GOBIN= go install github.com/kevinburke/go-bindata@latest env GOBIN= go install github.com/fjl/gencodec@latest env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest env GOBIN= go install ./cmd/abigen diff --git a/accounts/bls/keymanager.go b/accounts/bls/keymanager.go new file mode 100644 index 0000000000..ec4c6bba19 --- /dev/null +++ b/accounts/bls/keymanager.go @@ -0,0 +1,342 @@ +package bls + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "sync" + + ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/bls" + "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/google/uuid" + "github.com/pkg/errors" + keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" +) + +const ( + // IncorrectPasswordErrMsg defines a common error string representing an EIP-2335 + // keystore password was incorrect. + IncorrectPasswordErrMsg = "invalid checksum" + ImportedKeystoreStatus_IMPORTED = 0 + ImportedKeystoreStatus_DUPLICATE = 1 + ImportedKeystoreStatus_ERROR = 2 +) + +type ImportedKeystoreStatus struct { + Status int32 `json:"status,omitempty"` + Message string `json:"message,omitempty"` +} + +var ( + ErrNoPasswords = errors.New("no passwords provided for keystores") + ErrMismatchedNumPasswords = errors.New("number of passwords does not match number of keystores") +) + +type SignRequest struct { + PublicKey []byte `json:"public_key,omitempty"` + SigningRoot []byte `json:"signing_root,omitempty"` +} + +type KeyManager struct { + lock sync.RWMutex + + pubKeys [][params.BLSPubkeyLength]byte + secKeys map[[params.BLSPubkeyLength]byte]common.SecretKey + + wallet *Wallet + accountsStore *AccountStore +} + +// NewKeyManager instantiates a new local keymanager . +func NewKeyManager(ctx context.Context, wallet *Wallet) (*KeyManager, error) { + k := &KeyManager{ + wallet: wallet, + accountsStore: &AccountStore{}, + } + + if err := k.initializeAccountKeystore(ctx); err != nil { + return nil, errors.Wrap(err, "failed to initialize account store") + } + return k, nil +} + +func (km *KeyManager) initializeAccountKeystore(ctx context.Context) error { + encoded, err := km.wallet.ReadFile(ctx, AccountsKeystoreFileName) + if err != nil && strings.Contains(err.Error(), "no files found") { + // If there are no keys to initialize at all, just exit. + return nil + } else if err != nil { + return errors.Wrapf(err, "could not read keystore file for accounts %s", AccountsKeystoreFileName) + } + keystoreFile := &AccountsKeystoreRepresentation{} + if err := json.Unmarshal(encoded, keystoreFile); err != nil { + return errors.Wrapf(err, "could not decode keystore file for accounts %s", AccountsKeystoreFileName) + } + // We extract the validator signing private key from the keystore + // by utilizing the password and initialize a new BLS secret key from + // its raw bytes. + password := km.wallet.walletPassword + decryptor := keystorev4.New() + enc, err := decryptor.Decrypt(keystoreFile.Crypto, password) + if err != nil && strings.Contains(err.Error(), IncorrectPasswordErrMsg) { + return errors.Wrap(err, "wrong password for wallet entered") + } else if err != nil { + return errors.Wrap(err, "could not decrypt keystore") + } + + store := &AccountStore{} + if err := json.Unmarshal(enc, store); err != nil { + return err + } + if len(store.PublicKeys) != len(store.PrivateKeys) { + return errors.New("unequal number of public keys and private keys") + } + if len(store.PublicKeys) == 0 { + return nil + } + km.accountsStore = store + err = km.initializeKeysCachesFromKeystore() + if err != nil { + return errors.Wrap(err, "failed to initialize keys caches") + } + return err +} + +// Initialize public and secret key caches that are used to speed up the functions +// FetchValidatingPublicKeys and Sign +func (km *KeyManager) initializeKeysCachesFromKeystore() error { + km.lock.Lock() + defer km.lock.Unlock() + count := len(km.accountsStore.PrivateKeys) + km.pubKeys = make([][params.BLSPubkeyLength]byte, count) + km.secKeys = make(map[[params.BLSPubkeyLength]byte]common.SecretKey, count) + for i, publicKey := range km.accountsStore.PublicKeys { + publicKey48 := ethCommon.ToBytes48(publicKey) + km.pubKeys[i] = publicKey48 + secretKey, err := bls.SecretKeyFromBytes(km.accountsStore.PrivateKeys[i]) + if err != nil { + return errors.Wrap(err, "failed to initialize keys caches from account keystore") + } + km.secKeys[publicKey48] = secretKey + } + return nil +} + +// FetchValidatingPublicKeys fetches the list of active public keys from the local account keystores. +func (km *KeyManager) FetchValidatingPublicKeys(ctx context.Context) ([][params.BLSPubkeyLength]byte, error) { + km.lock.RLock() + defer km.lock.RUnlock() + keys := km.pubKeys + result := make([][params.BLSPubkeyLength]byte, len(keys)) + copy(result, keys) + return result, nil +} + +// FetchValidatingSecretKeys fetches the list of active secret keys from the local account keystores. +func (km *KeyManager) FetchValidatingSecretKeys(ctx context.Context) ([][params.BLSSecretKeyLength]byte, error) { + km.lock.RLock() + defer km.lock.RUnlock() + + var secretKeys [][params.BLSSecretKeyLength]byte + for _, secretKey := range km.secKeys { + secretKeys = append(secretKeys, [params.BLSSecretKeyLength]byte(secretKey.Marshal())) + } + + return secretKeys, nil +} + +// Sign signs a message using a validator key. +func (km *KeyManager) Sign(ctx context.Context, req *SignRequest) (bls.Signature, error) { + publicKey := req.PublicKey + if publicKey == nil { + return nil, errors.New("nil public key in request") + } + km.lock.RLock() + secretKey, ok := km.secKeys[ethCommon.ToBytes48(publicKey)] + km.lock.RUnlock() + if !ok { + return nil, errors.New("no signing key found in keys cache") + } + return secretKey.Sign(req.SigningRoot), nil +} + +// ImportKeystores into the local keymanager from an external source. +func (km *KeyManager) ImportKeystores( + ctx context.Context, + keystores []*Keystore, + passwords []string, +) ([]*ImportedKeystoreStatus, error) { + if len(passwords) == 0 { + return nil, ErrNoPasswords + } + if len(passwords) != len(keystores) { + return nil, ErrMismatchedNumPasswords + } + decryptor := keystorev4.New() + keys := map[string]string{} + statuses := make([]*ImportedKeystoreStatus, len(keystores)) + var err error + + for i := 0; i < len(keystores); i++ { + var privKeyBytes []byte + var pubKeyBytes []byte + privKeyBytes, pubKeyBytes, _, err = km.attemptDecryptKeystore(decryptor, keystores[i], passwords[i]) + if err != nil { + statuses[i] = &ImportedKeystoreStatus{ + Status: ImportedKeystoreStatus_ERROR, + Message: err.Error(), + } + continue + } + // if key exists prior to being added then output log that duplicate key was found + if _, ok := keys[string(pubKeyBytes)]; ok { + log.Warn(fmt.Sprintf("Duplicate key in import will be ignored: %#x", pubKeyBytes)) + statuses[i] = &ImportedKeystoreStatus{ + Status: ImportedKeystoreStatus_DUPLICATE, + } + continue + } + keys[string(pubKeyBytes)] = string(privKeyBytes) + statuses[i] = &ImportedKeystoreStatus{ + Status: ImportedKeystoreStatus_IMPORTED, + } + } + privKeys := make([][]byte, 0) + pubKeys := make([][]byte, 0) + for pubKey, privKey := range keys { + pubKeys = append(pubKeys, []byte(pubKey)) + privKeys = append(privKeys, []byte(privKey)) + } + + // Write the accounts to disk into a single keystore. + accountsKeystore, err := km.CreateAccountsKeystore(ctx, privKeys, pubKeys) + if err != nil { + return nil, err + } + encodedAccounts, err := json.MarshalIndent(accountsKeystore, "", "\t") + if err != nil { + return nil, err + } + if err := km.wallet.WriteFile(ctx, AccountsKeystoreFileName, encodedAccounts); err != nil { + return nil, err + } + return statuses, nil +} + +// ImportKeypairs directly into the keyManager. +func (km *KeyManager) ImportKeypairs(ctx context.Context, privKeys, pubKeys [][]byte) error { + // Write the accounts to disk into a single keystore. + accountsKeystore, err := km.CreateAccountsKeystore(ctx, privKeys, pubKeys) + if err != nil { + return errors.Wrap(err, "could not import account keypairs") + } + encodedAccounts, err := json.MarshalIndent(accountsKeystore, "", "\t") + if err != nil { + return errors.Wrap(err, "could not marshal accounts keystore into JSON") + } + return km.wallet.WriteFile(ctx, AccountsKeystoreFileName, encodedAccounts) +} + +// CreateAccountsKeystore creates a new keystore holding the provided keys. +func (km *KeyManager) CreateAccountsKeystore( + _ context.Context, + privateKeys, publicKeys [][]byte, +) (*AccountsKeystoreRepresentation, error) { + encryptor := keystorev4.New() + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + if len(privateKeys) != len(publicKeys) { + return nil, fmt.Errorf( + "number of private keys and public keys is not equal: %d != %d", len(privateKeys), len(publicKeys), + ) + } + if km.accountsStore == nil { + km.accountsStore = &AccountStore{ + PrivateKeys: privateKeys, + PublicKeys: publicKeys, + } + } else { + existingPubKeys := make(map[string]bool) + existingPrivKeys := make(map[string]bool) + for i := 0; i < len(km.accountsStore.PrivateKeys); i++ { + existingPrivKeys[string(km.accountsStore.PrivateKeys[i])] = true + existingPubKeys[string(km.accountsStore.PublicKeys[i])] = true + } + // We append to the accounts store keys only + // if the private/secret key do not already exist, to prevent duplicates. + for i := 0; i < len(privateKeys); i++ { + sk := privateKeys[i] + pk := publicKeys[i] + _, privKeyExists := existingPrivKeys[string(sk)] + _, pubKeyExists := existingPubKeys[string(pk)] + if privKeyExists || pubKeyExists { + continue + } + km.accountsStore.PublicKeys = append(km.accountsStore.PublicKeys, pk) + km.accountsStore.PrivateKeys = append(km.accountsStore.PrivateKeys, sk) + } + } + err = km.initializeKeysCachesFromKeystore() + if err != nil { + return nil, errors.Wrap(err, "failed to initialize keys caches") + } + encodedStore, err := json.MarshalIndent(km.accountsStore, "", "\t") + if err != nil { + return nil, err + } + cryptoFields, err := encryptor.Encrypt(encodedStore, km.wallet.walletPassword) + if err != nil { + return nil, errors.Wrap(err, "could not encrypt accounts") + } + return &AccountsKeystoreRepresentation{ + Crypto: cryptoFields, + ID: id.String(), + Version: encryptor.Version(), + Name: encryptor.Name(), + }, nil +} + +// Retrieves the private key and public key from an EIP-2335 keystore file +// by decrypting using a specified password. If the password fails, +// it prompts the user for the correct password until it confirms. +func (_ *KeyManager) attemptDecryptKeystore( + enc *keystorev4.Encryptor, keystore *Keystore, password string, +) ([]byte, []byte, string, error) { + // Attempt to decrypt the keystore with the specifies password. + var privKeyBytes []byte + var err error + privKeyBytes, err = enc.Decrypt(keystore.Crypto, password) + doesNotDecrypt := err != nil && strings.Contains(err.Error(), IncorrectPasswordErrMsg) + if doesNotDecrypt { + return nil, nil, "", fmt.Errorf( + "incorrect password for key 0x%s", + keystore.Pubkey, + ) + } + if err != nil && !strings.Contains(err.Error(), IncorrectPasswordErrMsg) { + return nil, nil, "", errors.Wrap(err, "could not decrypt keystore") + } + var pubKeyBytes []byte + // Attempt to use the pubkey present in the keystore itself as a field. If unavailable, + // then utilize the public key directly from the private key. + if keystore.Pubkey != "" { + pubKeyBytes, err = hex.DecodeString(keystore.Pubkey) + if err != nil { + return nil, nil, "", errors.Wrap(err, "could not decode pubkey from keystore") + } + } else { + privKey, err := bls.SecretKeyFromBytes(privKeyBytes) + if err != nil { + return nil, nil, "", errors.Wrap(err, "could not initialize private key from bytes") + } + pubKeyBytes = privKey.PublicKey().Marshal() + } + return privKeyBytes, pubKeyBytes, password, nil +} diff --git a/accounts/bls/keystore.go b/accounts/bls/keystore.go new file mode 100644 index 0000000000..eafd3bd4eb --- /dev/null +++ b/accounts/bls/keystore.go @@ -0,0 +1,69 @@ +package bls + +import ( + "encoding/json" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pkg/errors" + keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" +) + +// Keystore json file representation as a Go struct. +type Keystore struct { + Crypto map[string]interface{} `json:"crypto"` + ID string `json:"uuid"` + Pubkey string `json:"pubkey"` + Version uint `json:"version"` + Name string `json:"name"` + Path string `json:"path"` +} + +// Defines a struct containing 1-to-1 corresponding +// private keys and public keys for Ethereum validators. +type AccountStore struct { + PrivateKeys [][]byte `json:"private_keys"` + PublicKeys [][]byte `json:"public_keys"` +} + +// Copy creates a deep copy of accountStore +func (a *AccountStore) Copy() *AccountStore { + storeCopy := &AccountStore{} + storeCopy.PrivateKeys = common.Copy2dBytes(a.PrivateKeys) + storeCopy.PublicKeys = common.Copy2dBytes(a.PublicKeys) + return storeCopy +} + +// AccountsKeystoreRepresentation defines an internal Prysm representation +// of validator accounts, encrypted according to the EIP-2334 standard. +type AccountsKeystoreRepresentation struct { + Crypto map[string]interface{} `json:"crypto"` + ID string `json:"uuid"` + Version uint `json:"version"` + Name string `json:"name"` +} + +// CreateAccountsKeystoreRepresentation is a pure function that takes an accountStore and wallet password and returns the encrypted formatted json version for local writing. +func CreateAccountsKeystoreRepresentation( + store *AccountStore, + walletPW string, +) (*AccountsKeystoreRepresentation, error) { + encryptor := keystorev4.New() + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + encodedStore, err := json.MarshalIndent(store, "", "\t") + if err != nil { + return nil, err + } + cryptoFields, err := encryptor.Encrypt(encodedStore, walletPW) + if err != nil { + return nil, errors.Wrap(err, "could not encrypt accounts") + } + return &AccountsKeystoreRepresentation{ + Crypto: cryptoFields, + ID: id.String(), + Version: encryptor.Version(), + Name: encryptor.Name(), + }, nil +} diff --git a/accounts/bls/wallet.go b/accounts/bls/wallet.go new file mode 100644 index 0000000000..17efc20809 --- /dev/null +++ b/accounts/bls/wallet.go @@ -0,0 +1,99 @@ +package bls + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/log" + "github.com/pkg/errors" +) + +// AccountsKeystoreFileName exposes the name of the keystore file. +const AccountsKeystoreFileName = "all-accounts.keystore.json" + +type Wallet struct { + walletDir string + walletPassword string +} + +func New(walletDir, passwordPath string) (*Wallet, error) { + dirExists, err := HasDir(walletDir) + if err != nil { + return nil, err + } + if !dirExists { + return nil, fmt.Errorf("bls wallet dir does not exists, path: %s", walletDir) + } + + password, err := ioutil.ReadFile(passwordPath) + if err != nil { + return nil, err + } + + return &Wallet{walletDir: walletDir, walletPassword: string(password)}, nil +} + +// SaveWallet persists the wallet's directories to disk. +func (w *Wallet) SaveWallet() error { + if err := os.MkdirAll(w.walletDir, 0700); err != nil { + return errors.Wrap(err, "could not create wallet directory") + } + return nil +} + +func (w *Wallet) ReadFile(ctx context.Context, filename string) ([]byte, error) { + existDir, err := HasDir(w.walletDir) + if err != nil { + return nil, err + } + if !existDir { + if err = w.SaveWallet(); err != nil { + return nil, err + } + } + fullPath := filepath.Join(w.walletDir, filename) + matches, err := filepath.Glob(fullPath) + if err != nil { + return []byte{}, errors.Wrap(err, "could not find file") + } + if len(matches) == 0 { + return []byte{}, fmt.Errorf("no files found in path: %s", fullPath) + } + rawData, err := os.ReadFile(matches[0]) + if err != nil { + return nil, errors.Wrapf(err, "could not read path: %s", fullPath) + } + return rawData, nil +} + +func (w *Wallet) WriteFile(ctx context.Context, filename string, data []byte) error { + existDir, err := HasDir(w.walletDir) + if err != nil { + return err + } + if !existDir { + if err = w.SaveWallet(); err != nil { + return err + } + } + fullPath := filepath.Join(w.walletDir, filename) + if err := os.WriteFile(fullPath, data, 0700); err != nil { + return errors.Wrapf(err, "could not write %s", fullPath) + } + log.Debug("Wrote new file at path", "path", fullPath, "filename", filename) + return nil +} + +func HasDir(path string) (bool, error) { + info, err := os.Stat(path) + if os.IsNotExist(err) { + return false, nil + } + if info == nil { + return false, err + } + return info.IsDir(), err +} diff --git a/build/ci.go b/build/ci.go index c1b5f706e4..e6b24754d8 100644 --- a/build/ci.go +++ b/build/ci.go @@ -202,6 +202,7 @@ func doInstall(cmdline []string) { dlgo = flag.Bool("dlgo", false, "Download Go and build with it") arch = flag.String("arch", "", "Architecture to cross build for") cc = flag.String("cc", "", "C compiler to cross build with") + race = flag.Bool("race", false, "Execute the race detector") ) flag.CommandLine.Parse(cmdline) @@ -216,6 +217,10 @@ func doInstall(cmdline []string) { env := build.Env() gobuild := tc.Go("build", buildFlags(env)...) + if *race { + gobuild.Args = append(gobuild.Args, "-race") + } + // arm64 CI builders are memory-constrained and can't handle concurrent builds, // better disable it. This check isn't the best, it should probably // check for something in env instead. diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index 85f28b8cb1..ac3a29fe60 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "time" "github.com/ethereum/go-ethereum/accounts/keystore" @@ -151,10 +152,11 @@ func dnsSign(ctx *cli.Context) error { return fmt.Errorf("need tree definition directory and key file as arguments") } var ( - defdir = ctx.Args().Get(0) - keyfile = ctx.Args().Get(1) - def = loadTreeDefinition(defdir) - domain = directoryName(defdir) + defdir = ctx.Args().Get(0) + keyfile = ctx.Args().Get(1) + def = loadTreeDefinition(defdir) + domain = directoryName(defdir) + passwordfile string ) if def.Meta.URL != "" { d, _, err := dnsdisc.ParseURL(def.Meta.URL) @@ -176,7 +178,10 @@ func dnsSign(ctx *cli.Context) error { return err } - key := loadSigningKey(keyfile) + if ctx.NArg() == 3 { + passwordfile = ctx.Args().Get(2) + } + key := loadSigningKey(keyfile, passwordfile) url, err := t.Sign(key, domain) if err != nil { return fmt.Errorf("can't sign: %v", err) @@ -252,12 +257,23 @@ func dnsNukeRoute53(ctx *cli.Context) error { } // loadSigningKey loads a private key in Ethereum keystore format. -func loadSigningKey(keyfile string) *ecdsa.PrivateKey { +func loadSigningKey(keyfile, passwordfile string) *ecdsa.PrivateKey { keyjson, err := ioutil.ReadFile(keyfile) if err != nil { exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err)) } - password, _ := prompt.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ") + + var password string + if passwordfile != "" { + rawPassword, err := ioutil.ReadFile(passwordfile) + if err != nil { + exit(fmt.Errorf("failed to read the passwordfile at '%s': %v", passwordfile, err)) + } + password = strings.TrimRight(string(rawPassword), "\r\n") + } else { + password, _ = prompt.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ") + } + key, err := keystore.DecryptKey(keyjson, password) if err != nil { exit(fmt.Errorf("error decrypting key: %v", err)) diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index d65d6314c8..dc559c1bfe 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -136,6 +136,7 @@ type nodeFilterC struct { var filterFlags = map[string]nodeFilterC{ "-limit": {1, trueFilter}, // needed to skip over -limit "-ip": {1, ipFilter}, + "-ip-list": {1, ipListFilter}, "-min-age": {1, minAgeFilter}, "-eth-network": {1, ethFilter}, "-les-server": {0, lesFilter}, @@ -212,6 +213,25 @@ func ipFilter(args []string) (nodeFilter, error) { return f, nil } +func ipListFilter(args []string) (nodeFilter, error) { + rawIpList := strings.Split(args[0], ",") + var ipList []net.IP + for _, rawIp := range rawIpList { + ip := net.ParseIP(rawIp) + ipList = append(ipList, ip) + } + + f := func(n nodeJSON) bool { + for _, ip := range ipList { + if ip.Equal(n.N.IP()) { + return true + } + } + return false + } + return f, nil +} + func minAgeFilter(args []string) (nodeFilter, error) { minage, err := time.ParseDuration(args[0]) if err != nil { @@ -237,6 +257,10 @@ func ethFilter(args []string) (nodeFilter, error) { filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) case "sepolia": filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash) + case "ronin-mainnet": + filter = forkid.NewStaticFilter(params.RoninMainnetChainConfig, params.RoninMainnetGenesisHash) + case "ronin-testnet": + filter = forkid.NewStaticFilter(params.RoninTestnetChainConfig, params.RoninTestnetGenesisHash) default: return nil, fmt.Errorf("unknown network %q", args[0]) } diff --git a/cmd/ronin/accountcmd.go b/cmd/ronin/accountcmd.go index 648f254bdf..0787043d78 100644 --- a/cmd/ronin/accountcmd.go +++ b/cmd/ronin/accountcmd.go @@ -17,13 +17,19 @@ package main import ( + "context" + "encoding/hex" "fmt" "io/ioutil" + "strings" "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/accounts/bls" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" ) @@ -204,6 +210,56 @@ Check if the account corresponding to the private key exists in keystore. The keyfile is assumed to contain an unencrypted private key in hexadecimal format. `, }, + { + Name: "listbls", + Usage: "Print information of BLS account", + Action: utils.MigrateFlags(blsAccountList), + Flags: []cli.Flag{ + utils.BlsWalletPath, + utils.BlsPasswordPath, + cli.BoolFlag{ + Name: "secret", + Usage: "include the secret key in the output", + }, + }, + Description: `ronin account listbls [--secret]`, + }, + { + Name: "importbls", + Usage: "Import the BLS secret key", + Action: utils.MigrateFlags(blsAccountImport), + Flags: []cli.Flag{ + utils.BlsWalletPath, + utils.BlsPasswordPath, + }, + ArgsUsage: "", + Description: `ronin account importbls `, + }, + { + Name: "checkbls", + Usage: "Check if the BLS account corresponding to secret key exists", + Action: utils.MigrateFlags(blsAccountCheck), + Flags: []cli.Flag{ + utils.BlsWalletPath, + utils.BlsPasswordPath, + }, + ArgsUsage: "", + Description: `ronin account checkbls `, + }, + { + Name: "generatebls", + Usage: "Generate BLS secret key", + Action: utils.MigrateFlags(blsAccountGenerate), + Flags: []cli.Flag{ + utils.BlsWalletPath, + utils.BlsPasswordPath, + cli.BoolFlag{ + Name: "secret", + Usage: "include the secret key in the output", + }, + }, + Description: `ronin account generatebls [--secret]`, + }, }, } ) @@ -398,3 +454,176 @@ func accountCheck(ctx *cli.Context) error { utils.Fatalf("Account %x not found", address) return nil } + +func loadKeyManager(ctx *cli.Context) (*bls.KeyManager, []blsCommon.PublicKey, error) { + blsPasswordPath := ctx.GlobalString(utils.BlsPasswordPath.Name) + blsWalletPath := ctx.GlobalString(utils.BlsWalletPath.Name) + + wallet, err := bls.New(blsWalletPath, blsPasswordPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to create wallet, err %s", err) + } + + km, err := bls.NewKeyManager(context.Background(), wallet) + if err != nil { + return nil, nil, fmt.Errorf("failed to initialized key manager, err %s", err) + } + + rawPublicKeys, err := km.FetchValidatingPublicKeys(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("failed to fetch BLS public key, err %s", err) + } + + var publicKeys []blsCommon.PublicKey + for _, rawPublicKey := range rawPublicKeys { + publicKey, err := blst.PublicKeyFromBytes(rawPublicKey[:]) + if err != nil { + return nil, nil, fmt.Errorf("failed to decode BLS public key, err %s", err) + } + + publicKeys = append(publicKeys, publicKey) + } + + return km, publicKeys, nil +} + +func loadBlsSecretKey(ctx *cli.Context) (blsCommon.SecretKey, error) { + keyfile := ctx.Args().First() + if len(keyfile) == 0 { + utils.Fatalf("keyfile must be given as argument") + } + + secretKeyHex, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, fmt.Errorf("failed to read secret key, err %s", err) + } + secretKeyHexString := strings.TrimSuffix(string(secretKeyHex), "\n") + + key, err := hex.DecodeString(secretKeyHexString) + if err != nil { + return nil, fmt.Errorf("failed to decode secret key, err %s", err) + } + + secretKey, err := blst.SecretKeyFromBytes(key) + if err != nil { + return nil, fmt.Errorf("failed to decode secret key, err %s", err) + } + + return secretKey, nil +} + +func blsAccountList(ctx *cli.Context) error { + km, publicKeys, err := loadKeyManager(ctx) + if err != nil { + utils.Fatalf("Failed to fetch BLS public key, err %s", err) + } + + if ctx.Bool("secret") { + rawSecretKeys, err := km.FetchValidatingSecretKeys(context.Background()) + if err != nil { + utils.Fatalf("Failed to fetch BLS secret key, err %s", err) + } + + for i, rawsecretKey := range rawSecretKeys { + secretKey, err := blst.SecretKeyFromBytes(rawsecretKey[:]) + if err != nil { + utils.Fatalf("Failed to decode BLS secret key, err %s", err) + } + + fmt.Printf("BLS public key #%d: {%x}\n", i, secretKey.PublicKey().Marshal()) + fmt.Printf("BLS secret key #%d: {%x}\n", i, secretKey.Marshal()) + fmt.Println() + } + + } else { + for i, publicKey := range publicKeys { + fmt.Printf("BLS public key #%d: {%x}\n", i, publicKey.Marshal()) + } + } + + return nil +} + +func blsAccountImport(ctx *cli.Context) error { + secretKey, err := loadBlsSecretKey(ctx) + if err != nil { + utils.Fatalf("Failed to load BLS secret key, err %s", err) + } + + publicKey := secretKey.PublicKey() + + km, publicKeys, err := loadKeyManager(ctx) + if err != nil { + utils.Fatalf("Failed to load BLS public key, err %s", err) + } + + for _, pkey := range publicKeys { + if pkey.Equals(publicKey) { + utils.Fatalf("Account already existed, public key: {%x}", publicKey.Marshal()) + } + } + + err = km.ImportKeypairs( + context.Background(), + [][]byte{secretKey.Marshal()}, + [][]byte{publicKey.Marshal()}, + ) + if err != nil { + utils.Fatalf("Failed to import secret key, err %s", err) + } + + return nil +} + +func blsAccountCheck(ctx *cli.Context) error { + secretKey, err := loadBlsSecretKey(ctx) + if err != nil { + utils.Fatalf("Failed to load BLS secret key, err %s", err) + } + publicKey := secretKey.PublicKey() + + _, publicKeys, err := loadKeyManager(ctx) + if err != nil { + utils.Fatalf("Failed to load BLS public key, err %s", err) + } + + for _, pkey := range publicKeys { + if pkey.Equals(publicKey) { + fmt.Printf("Found BLS account %x\n", publicKey.Marshal()) + return nil + } + } + + utils.Fatalf("BLS account %x not found", publicKey.Marshal()) + return nil +} + +func blsAccountGenerate(ctx *cli.Context) error { + secretKey, err := blst.RandKey() + if err != nil { + utils.Fatalf("Failed to generate secret key, err %s", err) + } + + km, _, err := loadKeyManager(ctx) + if err != nil { + utils.Fatalf("Failed to load BLS public key, err %s", err) + } + + err = km.ImportKeypairs( + context.Background(), + [][]byte{secretKey.Marshal()}, + [][]byte{secretKey.PublicKey().Marshal()}, + ) + if err != nil { + utils.Fatalf("Failed to import generated BLS key, err %s", err) + } + + fmt.Println("Successfully generated BLS key") + fmt.Printf("Public key: {%x}\n", secretKey.PublicKey().Marshal()) + + if ctx.Bool("secret") { + fmt.Printf("Secret key: {%x}\n", secretKey.Marshal()) + } + + return nil +} diff --git a/cmd/ronin/main.go b/cmd/ronin/main.go index 1af7c41823..06bb2f0794 100644 --- a/cmd/ronin/main.go +++ b/cmd/ronin/main.go @@ -167,6 +167,12 @@ var ( utils.CatalystFlag, utils.MonitorDoubleSign, utils.StoreInternalTransactions, + utils.MaxCurVoteAmountPerBlock, + utils.EnableFastFinality, + utils.EnableFastFinalitySign, + utils.BlsPasswordPath, + utils.BlsWalletPath, + utils.DisableRoninProtocol, } rpcFlags = []cli.Flag{ diff --git a/cmd/ronin/usage.go b/cmd/ronin/usage.go index d2795081e2..682398f4bf 100644 --- a/cmd/ronin/usage.go +++ b/cmd/ronin/usage.go @@ -57,6 +57,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.ForceOverrideChainConfigFlag, utils.MonitorDoubleSign, utils.StoreInternalTransactions, + utils.DisableRoninProtocol, }, }, { @@ -251,6 +252,16 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.CatalystFlag, }, }, + { + Name: "FAST FINALITY", + Flags: []cli.Flag{ + utils.MaxCurVoteAmountPerBlock, + utils.EnableFastFinality, + utils.EnableFastFinalitySign, + utils.BlsPasswordPath, + utils.BlsWalletPath, + }, + }, } func init() { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 438a3c9678..b8e13f2afb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -871,6 +871,39 @@ var ( Usage: "Pyroscope server address", Value: "http://localhost:4040", } + + MaxCurVoteAmountPerBlock = cli.IntFlag{ + Name: "votepool.maxcurvoteperblock", + Usage: "The maximum finality vote per current block", + Value: 22, + } + + EnableFastFinality = cli.BoolFlag{ + Name: "finality.enable", + Usage: "Enable fast finality vote", + } + + EnableFastFinalitySign = cli.BoolFlag{ + Name: "finality.enablesign", + Usage: "Enable fast finality vote signing", + } + + BlsPasswordPath = cli.StringFlag{ + Name: "finality.blspasswordpath", + Usage: "The path to bls wallet password file", + Value: "bls_password", + } + + BlsWalletPath = cli.StringFlag{ + Name: "finality.blswalletpath", + Usage: "The path to bls wallet secret key", + Value: "bls_keystore", + } + + DisableRoninProtocol = cli.BoolFlag{ + Name: "ronin.disable", + Usage: "Disable ronin p2p protocol", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1302,6 +1335,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { setNodeUserIdent(ctx, cfg) setDataDir(ctx, cfg) setSmartCard(ctx, cfg) + setFastFinality(ctx, cfg) if ctx.GlobalIsSet(ExternalSignerFlag.Name) { cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name) @@ -1330,6 +1364,14 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { } } +func setFastFinality(ctx *cli.Context, cfg *node.Config) { + cfg.MaxCurVoteAmountPerBlock = ctx.GlobalInt(MaxCurVoteAmountPerBlock.Name) + cfg.EnableFastFinality = ctx.GlobalBool(EnableFastFinality.Name) + cfg.EnableFastFinalitySign = ctx.GlobalBool(EnableFastFinalitySign.Name) + cfg.BlsPasswordPath = ctx.GlobalString(BlsPasswordPath.Name) + cfg.BlsWalletPath = ctx.GlobalString(BlsWalletPath.Name) +} + func setSmartCard(ctx *cli.Context, cfg *node.Config) { // Skip enabling smartcards if no path is set path := ctx.GlobalString(SmartCardDaemonPathFlag.Name) @@ -1698,6 +1740,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.EthDiscoveryURLs = SplitAndTrim(urls) } } + if ctx.GlobalIsSet(DisableRoninProtocol.Name) { + cfg.DisableRoninProtocol = ctx.GlobalBool(DisableRoninProtocol.Name) + } // Override any default configs for hard coded networks. switch { case ctx.GlobalBool(MainnetFlag.Name): diff --git a/common/bytes.go b/common/bytes.go index 7827bb572e..c7006f49c3 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -116,6 +116,15 @@ func LeftPadBytes(slice []byte, l int) []byte { return padded } +// PadTo pads a byte slice to the given size. If the byte slice is larger than the given size, the +// original slice is returned. +func PadTo(b []byte, size int) []byte { + if len(b) >= size { + return b + } + return append(b, make([]byte, size-len(b))...) +} + // TrimLeftZeroes returns a subslice of s without leading zeroes func TrimLeftZeroes(s []byte) []byte { idx := 0 @@ -137,3 +146,15 @@ func TrimRightZeroes(s []byte) []byte { } return s[:idx] } + +// Copy2dBytes will copy and return a non-nil 2d byte slice, otherwise it returns nil. +func Copy2dBytes(ary [][]byte) [][]byte { + if ary != nil { + copied := make([][]byte, len(ary)) + for i, a := range ary { + copied[i] = CopyBytes(a) + } + return copied + } + return nil +} diff --git a/common/bytes_go120.go b/common/bytes_go120.go new file mode 100644 index 0000000000..e12b09c7c4 --- /dev/null +++ b/common/bytes_go120.go @@ -0,0 +1,48 @@ +//go:build go1.20 +// +build go1.20 + +package common + +// These methods use go1.20 syntax to convert a byte slice to a fixed size array. + +// ToBytes4 is a convenience method for converting a byte slice to a fix +// sized 4 byte array. This method will truncate the input if it is larger +// than 4 bytes. +func ToBytes4(x []byte) [4]byte { + return [4]byte(PadTo(x, 4)) +} + +// ToBytes20 is a convenience method for converting a byte slice to a fix +// sized 20 byte array. This method will truncate the input if it is larger +// than 20 bytes. +func ToBytes20(x []byte) [20]byte { + return [20]byte(PadTo(x, 20)) +} + +// ToBytes32 is a convenience method for converting a byte slice to a fix +// sized 32 byte array. This method will truncate the input if it is larger +// than 32 bytes. +func ToBytes32(x []byte) [32]byte { + return [32]byte(PadTo(x, 32)) +} + +// ToBytes48 is a convenience method for converting a byte slice to a fix +// sized 48 byte array. This method will truncate the input if it is larger +// than 48 bytes. +func ToBytes48(x []byte) [48]byte { + return [48]byte(PadTo(x, 48)) +} + +// ToBytes64 is a convenience method for converting a byte slice to a fix +// sized 64 byte array. This method will truncate the input if it is larger +// than 64 bytes. +func ToBytes64(x []byte) [64]byte { + return [64]byte(PadTo(x, 64)) +} + +// ToBytes96 is a convenience method for converting a byte slice to a fix +// sized 96 byte array. This method will truncate the input if it is larger +// than 96 bytes. +func ToBytes96(x []byte) [96]byte { + return [96]byte(PadTo(x, 96)) +} diff --git a/consensus/consensus.go b/consensus/consensus.go index 56d5c573cb..bb7e095c45 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -18,9 +18,11 @@ package consensus import ( + "math/big" + + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" - "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" @@ -147,3 +149,25 @@ type PoSA interface { IsSystemTransaction(tx *types.Transaction, header *types.Header) (bool, error) IsSystemContract(to *common.Address) bool } + +type FastFinalityPoSA interface { + PoSA + + GetJustifiedBlock(chain ChainHeaderReader, blockNumber uint64, blockHash common.Hash) (uint64, common.Hash) + GetFinalizedBlock(chain ChainHeaderReader, blockNumber uint64, blockHash common.Hash) (uint64, common.Hash) + + // IsActiveValidatorAt always returns false before Shillin + IsActiveValidatorAt(chain ChainHeaderReader, header *types.Header) bool + + // VerifyVote check if the finality voter is in the validator set, it assumes the signature is + // already verified + VerifyVote(chain ChainHeaderReader, vote *types.VoteEnvelope) error + + SetVotePool(votePool VotePool) + + GetActiveValidatorAt(chain ChainHeaderReader, blockNumber uint64, blockHash common.Hash) []finality.ValidatorWithBlsPub +} + +type VotePool interface { + FetchVoteByBlockHash(blockHash common.Hash) []*types.VoteEnvelope +} diff --git a/consensus/consortium/common/constants.go b/consensus/consortium/common/constants.go index 4106219042..84da1c44f5 100644 --- a/consensus/consortium/common/constants.go +++ b/consensus/consortium/common/constants.go @@ -2,11 +2,13 @@ package common import ( "errors" + "github.com/ethereum/go-ethereum/crypto" ) const ( - ExtraSeal = crypto.SignatureLength // Fixed number of extra-data suffix bytes reserved for signer seal + ExtraSeal = crypto.SignatureLength + ExtraVanity = 32 ) var ( diff --git a/consensus/consortium/common/contract.go b/consensus/consortium/common/contract.go index 2b0bc120d8..8d5733aebe 100644 --- a/consensus/consortium/common/contract.go +++ b/consensus/consortium/common/contract.go @@ -15,11 +15,15 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus" + finalityTracking "github.com/ethereum/go-ethereum/consensus/consortium/generated_contracts/finality_tracking" + "github.com/ethereum/go-ethereum/consensus/consortium/generated_contracts/profile" roninValidatorSet "github.com/ethereum/go-ethereum/consensus/consortium/generated_contracts/ronin_validator_set" slashIndicator "github.com/ethereum/go-ethereum/consensus/consortium/generated_contracts/slash_indicator" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" chainParams "github.com/ethereum/go-ethereum/params" @@ -50,12 +54,23 @@ func getTransactionOpts(from common.Address, nonce uint64, chainId *big.Int, sig } } +type ContractInteraction interface { + GetValidators(blockNumber *big.Int) ([]common.Address, error) + WrapUpEpoch(opts *ApplyTransactOpts) error + SubmitBlockReward(opts *ApplyTransactOpts) error + Slash(opts *ApplyTransactOpts, spoiledValidator common.Address) error + FinalityReward(opts *ApplyTransactOpts, votedValidators []common.Address) error + GetBlsPublicKey(blockNumber *big.Int, validator common.Address) (blsCommon.PublicKey, error) +} + // ContractIntegrator is a contract facing to interact with smart contract that supports DPoS type ContractIntegrator struct { chainId *big.Int signer types.Signer roninValidatorSetSC *roninValidatorSet.RoninValidatorSet slashIndicatorSC *slashIndicator.SlashIndicator + profileSC *profile.Profile + finalityTrackingSC *finalityTracking.FinalityTracking signTxFn SignerTxFn coinbase common.Address } @@ -74,10 +89,24 @@ func NewContractIntegrator(config *chainParams.ChainConfig, backend bind.Contrac return nil, err } + // Create Profile contract instance + profileSC, err := profile.NewProfile(config.ConsortiumV2Contracts.ProfileContract, backend) + if err != nil { + return nil, err + } + + // Create Finality Tracking contract instance + finalityTrackingSC, err := finalityTracking.NewFinalityTracking(config.ConsortiumV2Contracts.FinalityTracking, backend) + if err != nil { + return nil, err + } + return &ContractIntegrator{ chainId: config.ChainID, roninValidatorSetSC: roninValidatorSetSC, slashIndicatorSC: slashIndicatorSC, + profileSC: profileSC, + finalityTrackingSC: finalityTrackingSC, signTxFn: signTxFn, signer: types.LatestSignerForChainID(config.ChainID), coinbase: coinbase, @@ -192,6 +221,50 @@ func (c *ContractIntegrator) Slash(opts *ApplyTransactOpts, spoiledValidator com return nil } +func (c *ContractIntegrator) FinalityReward(opts *ApplyTransactOpts, votedValidators []common.Address) error { + nonce := opts.State.GetNonce(c.coinbase) + tx, err := c.finalityTrackingSC.RecordFinality(getTransactionOpts(c.coinbase, nonce, c.chainId, c.signTxFn), votedValidators) + if err != nil { + return err + } + + msg := types.NewMessage( + opts.Header.Coinbase, + tx.To(), + opts.State.GetNonce(opts.Header.Coinbase), + tx.Value(), + tx.Gas(), + big.NewInt(0), + big.NewInt(0), + big.NewInt(0), + tx.Data(), + tx.AccessList(), + false, + ) + + if err = ApplyTransaction(msg, opts); err != nil { + return err + } + + return nil +} + +func (c *ContractIntegrator) GetBlsPublicKey(blockNumber *big.Int, validator common.Address) (blsCommon.PublicKey, error) { + callOpts := bind.CallOpts{ + BlockNumber: blockNumber, + } + validatorProfile, err := c.profileSC.GetId2Profile(&callOpts, validator) + if err != nil { + return nil, err + } + blsPublicKey, err := blst.PublicKeyFromBytes(validatorProfile.Pubkey) + if err != nil { + return nil, err + } + + return blsPublicKey, nil +} + // ApplyMessageOpts is the collection of options to fine tune a contract call request. type ApplyMessageOpts struct { State *state.StateDB diff --git a/consensus/consortium/common/types.go b/consensus/consortium/common/types.go index d497d924f1..36a224f81d 100644 --- a/consensus/consortium/common/types.go +++ b/consensus/consortium/common/types.go @@ -1,11 +1,12 @@ package common import ( + "math/big" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/types" - "math/big" ) // SignerFn is a signer callback function to request the wallet to sign the hash of the given data diff --git a/consensus/consortium/generated_contracts/finality_tracking/finality_tracking.go b/consensus/consortium/generated_contracts/finality_tracking/finality_tracking.go new file mode 100644 index 0000000000..4c9c54c133 --- /dev/null +++ b/consensus/consortium/generated_contracts/finality_tracking/finality_tracking.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package finalityTracking + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// FinalityTrackingMetaData contains all meta data concerning the FinalityTracking contract. +var FinalityTrackingMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"voters\",\"type\":\"address[]\"}],\"name\":\"recordFinality\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// FinalityTrackingABI is the input ABI used to generate the binding from. +// Deprecated: Use FinalityTrackingMetaData.ABI instead. +var FinalityTrackingABI = FinalityTrackingMetaData.ABI + +// FinalityTracking is an auto generated Go binding around an Ethereum contract. +type FinalityTracking struct { + FinalityTrackingCaller // Read-only binding to the contract + FinalityTrackingTransactor // Write-only binding to the contract + FinalityTrackingFilterer // Log filterer for contract events +} + +// FinalityTrackingCaller is an auto generated read-only Go binding around an Ethereum contract. +type FinalityTrackingCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// FinalityTrackingTransactor is an auto generated write-only Go binding around an Ethereum contract. +type FinalityTrackingTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// FinalityTrackingFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type FinalityTrackingFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// FinalityTrackingSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type FinalityTrackingSession struct { + Contract *FinalityTracking // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// FinalityTrackingCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type FinalityTrackingCallerSession struct { + Contract *FinalityTrackingCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// FinalityTrackingTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type FinalityTrackingTransactorSession struct { + Contract *FinalityTrackingTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// FinalityTrackingRaw is an auto generated low-level Go binding around an Ethereum contract. +type FinalityTrackingRaw struct { + Contract *FinalityTracking // Generic contract binding to access the raw methods on +} + +// FinalityTrackingCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type FinalityTrackingCallerRaw struct { + Contract *FinalityTrackingCaller // Generic read-only contract binding to access the raw methods on +} + +// FinalityTrackingTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type FinalityTrackingTransactorRaw struct { + Contract *FinalityTrackingTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewFinalityTracking creates a new instance of FinalityTracking, bound to a specific deployed contract. +func NewFinalityTracking(address common.Address, backend bind.ContractBackend) (*FinalityTracking, error) { + contract, err := bindFinalityTracking(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FinalityTracking{FinalityTrackingCaller: FinalityTrackingCaller{contract: contract}, FinalityTrackingTransactor: FinalityTrackingTransactor{contract: contract}, FinalityTrackingFilterer: FinalityTrackingFilterer{contract: contract}}, nil +} + +// NewFinalityTrackingCaller creates a new read-only instance of FinalityTracking, bound to a specific deployed contract. +func NewFinalityTrackingCaller(address common.Address, caller bind.ContractCaller) (*FinalityTrackingCaller, error) { + contract, err := bindFinalityTracking(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FinalityTrackingCaller{contract: contract}, nil +} + +// NewFinalityTrackingTransactor creates a new write-only instance of FinalityTracking, bound to a specific deployed contract. +func NewFinalityTrackingTransactor(address common.Address, transactor bind.ContractTransactor) (*FinalityTrackingTransactor, error) { + contract, err := bindFinalityTracking(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FinalityTrackingTransactor{contract: contract}, nil +} + +// NewFinalityTrackingFilterer creates a new log filterer instance of FinalityTracking, bound to a specific deployed contract. +func NewFinalityTrackingFilterer(address common.Address, filterer bind.ContractFilterer) (*FinalityTrackingFilterer, error) { + contract, err := bindFinalityTracking(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FinalityTrackingFilterer{contract: contract}, nil +} + +// bindFinalityTracking binds a generic wrapper to an already deployed contract. +func bindFinalityTracking(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FinalityTrackingMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_FinalityTracking *FinalityTrackingRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FinalityTracking.Contract.FinalityTrackingCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_FinalityTracking *FinalityTrackingRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FinalityTracking.Contract.FinalityTrackingTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_FinalityTracking *FinalityTrackingRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FinalityTracking.Contract.FinalityTrackingTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_FinalityTracking *FinalityTrackingCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FinalityTracking.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_FinalityTracking *FinalityTrackingTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FinalityTracking.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_FinalityTracking *FinalityTrackingTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FinalityTracking.Contract.contract.Transact(opts, method, params...) +} + +// RecordFinality is a paid mutator transaction binding the contract method 0xc245db0f. +// +// Solidity: function recordFinality(address[] voters) returns() +func (_FinalityTracking *FinalityTrackingTransactor) RecordFinality(opts *bind.TransactOpts, voters []common.Address) (*types.Transaction, error) { + return _FinalityTracking.contract.Transact(opts, "recordFinality", voters) +} + +// RecordFinality is a paid mutator transaction binding the contract method 0xc245db0f. +// +// Solidity: function recordFinality(address[] voters) returns() +func (_FinalityTracking *FinalityTrackingSession) RecordFinality(voters []common.Address) (*types.Transaction, error) { + return _FinalityTracking.Contract.RecordFinality(&_FinalityTracking.TransactOpts, voters) +} + +// RecordFinality is a paid mutator transaction binding the contract method 0xc245db0f. +// +// Solidity: function recordFinality(address[] voters) returns() +func (_FinalityTracking *FinalityTrackingTransactorSession) RecordFinality(voters []common.Address) (*types.Transaction, error) { + return _FinalityTracking.Contract.RecordFinality(&_FinalityTracking.TransactOpts, voters) +} diff --git a/consensus/consortium/generated_contracts/profile/profile.go b/consensus/consortium/generated_contracts/profile/profile.go new file mode 100644 index 0000000000..649bece071 --- /dev/null +++ b/consensus/consortium/generated_contracts/profile/profile.go @@ -0,0 +1,222 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package profile + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// IProfileCandidateProfile is an auto generated low-level Go binding around an user-defined struct. +type IProfileCandidateProfile struct { + Id common.Address + Consensus common.Address + Admin common.Address + Treasury common.Address + Governor common.Address + Pubkey []byte +} + +// ProfileMetaData contains all meta data concerning the Profile contract. +var ProfileMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"id\",\"type\":\"address\"}],\"name\":\"getId2Profile\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"id\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"consensus\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"addresspayable\",\"name\":\"treasury\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"governor\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"pubkey\",\"type\":\"bytes\"}],\"internalType\":\"structIProfile.CandidateProfile\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +// ProfileABI is the input ABI used to generate the binding from. +// Deprecated: Use ProfileMetaData.ABI instead. +var ProfileABI = ProfileMetaData.ABI + +// Profile is an auto generated Go binding around an Ethereum contract. +type Profile struct { + ProfileCaller // Read-only binding to the contract + ProfileTransactor // Write-only binding to the contract + ProfileFilterer // Log filterer for contract events +} + +// ProfileCaller is an auto generated read-only Go binding around an Ethereum contract. +type ProfileCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ProfileTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ProfileTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ProfileFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ProfileFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ProfileSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ProfileSession struct { + Contract *Profile // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ProfileCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ProfileCallerSession struct { + Contract *ProfileCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ProfileTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ProfileTransactorSession struct { + Contract *ProfileTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ProfileRaw is an auto generated low-level Go binding around an Ethereum contract. +type ProfileRaw struct { + Contract *Profile // Generic contract binding to access the raw methods on +} + +// ProfileCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ProfileCallerRaw struct { + Contract *ProfileCaller // Generic read-only contract binding to access the raw methods on +} + +// ProfileTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ProfileTransactorRaw struct { + Contract *ProfileTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewProfile creates a new instance of Profile, bound to a specific deployed contract. +func NewProfile(address common.Address, backend bind.ContractBackend) (*Profile, error) { + contract, err := bindProfile(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Profile{ProfileCaller: ProfileCaller{contract: contract}, ProfileTransactor: ProfileTransactor{contract: contract}, ProfileFilterer: ProfileFilterer{contract: contract}}, nil +} + +// NewProfileCaller creates a new read-only instance of Profile, bound to a specific deployed contract. +func NewProfileCaller(address common.Address, caller bind.ContractCaller) (*ProfileCaller, error) { + contract, err := bindProfile(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ProfileCaller{contract: contract}, nil +} + +// NewProfileTransactor creates a new write-only instance of Profile, bound to a specific deployed contract. +func NewProfileTransactor(address common.Address, transactor bind.ContractTransactor) (*ProfileTransactor, error) { + contract, err := bindProfile(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ProfileTransactor{contract: contract}, nil +} + +// NewProfileFilterer creates a new log filterer instance of Profile, bound to a specific deployed contract. +func NewProfileFilterer(address common.Address, filterer bind.ContractFilterer) (*ProfileFilterer, error) { + contract, err := bindProfile(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ProfileFilterer{contract: contract}, nil +} + +// bindProfile binds a generic wrapper to an already deployed contract. +func bindProfile(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ProfileMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Profile *ProfileRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Profile.Contract.ProfileCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Profile *ProfileRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Profile.Contract.ProfileTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Profile *ProfileRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Profile.Contract.ProfileTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Profile *ProfileCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Profile.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Profile *ProfileTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Profile.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Profile *ProfileTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Profile.Contract.contract.Transact(opts, method, params...) +} + +// GetId2Profile is a free data retrieval call binding the contract method 0xf4660940. +// +// Solidity: function getId2Profile(address id) view returns((address,address,address,address,address,bytes)) +func (_Profile *ProfileCaller) GetId2Profile(opts *bind.CallOpts, id common.Address) (IProfileCandidateProfile, error) { + var out []interface{} + err := _Profile.contract.Call(opts, &out, "getId2Profile", id) + + if err != nil { + return *new(IProfileCandidateProfile), err + } + + out0 := *abi.ConvertType(out[0], new(IProfileCandidateProfile)).(*IProfileCandidateProfile) + + return out0, err + +} + +// GetId2Profile is a free data retrieval call binding the contract method 0xf4660940. +// +// Solidity: function getId2Profile(address id) view returns((address,address,address,address,address,bytes)) +func (_Profile *ProfileSession) GetId2Profile(id common.Address) (IProfileCandidateProfile, error) { + return _Profile.Contract.GetId2Profile(&_Profile.CallOpts, id) +} + +// GetId2Profile is a free data retrieval call binding the contract method 0xf4660940. +// +// Solidity: function getId2Profile(address id) view returns((address,address,address,address,address,bytes)) +func (_Profile *ProfileCallerSession) GetId2Profile(id common.Address) (IProfileCandidateProfile, error) { + return _Profile.Contract.GetId2Profile(&_Profile.CallOpts, id) +} diff --git a/consensus/consortium/main.go b/consensus/consortium/main.go index 0d0cee3561..c8e608de3f 100644 --- a/consensus/consortium/main.go +++ b/consensus/consortium/main.go @@ -8,6 +8,7 @@ import ( consortiumCommon "github.com/ethereum/go-ethereum/consensus/consortium/common" v1 "github.com/ethereum/go-ethereum/consensus/consortium/v1" v2 "github.com/ethereum/go-ethereum/consensus/consortium/v2" + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -202,6 +203,62 @@ func (c *Consortium) GetBestParentBlock(chain *core.BlockChain) (*types.Block, b return c.v2.GetBestParentBlock(chain) } +func (c *Consortium) GetJustifiedBlock( + chain consensus.ChainHeaderReader, + blockNumber uint64, + blockHash common.Hash, +) (uint64, common.Hash) { + if c.chainConfig.IsShillin(new(big.Int).SetUint64(blockNumber)) { + return c.v2.GetJustifiedBlock(chain, blockNumber, blockHash) + } + return 0, common.Hash{} +} + +func (c *Consortium) GetFinalizedBlock( + chain consensus.ChainHeaderReader, + headNumber uint64, + headHash common.Hash, +) (uint64, common.Hash) { + if c.chainConfig.IsShillin(new(big.Int).SetUint64(headNumber)) { + return c.v2.GetFinalizedBlock(chain, headNumber, headHash) + } + return 0, common.Hash{} +} + +func (c *Consortium) SetVotePool(votePool consensus.VotePool) { + c.v2.SetVotePool(votePool) +} + +// IsActiveValidatorAt always returns false before Shillin +func (c *Consortium) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *types.Header) bool { + if c.chainConfig.IsShillin(header.Number) { + return c.v2.IsActiveValidatorAt(chain, header) + } + + return false +} + +// VerifyVote check if the finality voter is in the validator set, it assumes the signature is +// already verified +func (c *Consortium) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { + return c.v2.VerifyVote(chain, vote) +} + +// GetActiveValidatorAt always return false before Shillin +// See the comment for GetActiveValidatorAt in v2 package +// for more information +func (c *Consortium) GetActiveValidatorAt( + chain consensus.ChainHeaderReader, + blockNumber uint64, + blockHash common.Hash, +) []finality.ValidatorWithBlsPub { + if c.chainConfig.IsShillin(big.NewInt(int64(blockNumber))) { + return c.v2.GetActiveValidatorAt(chain, blockNumber, blockHash) + } + + return nil +} + // HandleSystemTransaction fixes up the statedb when system transaction // goes through ApplyMessage when tracing/debugging func HandleSystemTransaction(engine consensus.Engine, statedb *state.StateDB, msg core.Message, block *types.Block) bool { diff --git a/consensus/consortium/v2/api.go b/consensus/consortium/v2/api.go new file mode 100644 index 0000000000..a168eeb164 --- /dev/null +++ b/consensus/consortium/v2/api.go @@ -0,0 +1,80 @@ +package v2 + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + consortiumCommon "github.com/ethereum/go-ethereum/consensus/consortium/common" + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" +) + +type consortiumV2Api struct { + chain consensus.ChainHeaderReader + consortium *Consortium +} + +// GetValidatorAtHash returns the authorized validators that can seal block hash with +// their BLS public key if available +func (api *consortiumV2Api) GetValidatorAtHash(hash common.Hash) ([]finality.ValidatorWithBlsPub, error) { + header := api.chain.GetHeaderByHash(hash) + if header == nil { + return nil, consortiumCommon.ErrUnknownBlock + } + + snap, err := api.consortium.snapshot(api.chain, header.Number.Uint64()-1, header.ParentHash, nil) + if err != nil { + return nil, err + } + + if snap.ValidatorsWithBlsPub != nil { + return snap.ValidatorsWithBlsPub, nil + } + + var validators []finality.ValidatorWithBlsPub + for validator := range snap.Validators { + validators = append(validators, finality.ValidatorWithBlsPub{ + Address: validator, + }) + } + + return validators, nil +} + +type finalityVote struct { + Signature string `json:"signature"` + VoterPublicKey []string `json:"voterPublicKey"` + VoterAddress []string `json:"voterAddress"` +} + +// GetFinalityVoteAtHash returns the finality vote at block hash +func (api *consortiumV2Api) GetFinalityVoteAtHash(hash common.Hash) (*finalityVote, error) { + header := api.chain.GetHeaderByHash(hash) + if header == nil { + return nil, consortiumCommon.ErrUnknownBlock + } + + isShillin := api.consortium.chainConfig.IsShillin(header.Number) + extraData, err := finality.DecodeExtra(header.Extra, isShillin) + if err != nil { + return nil, err + } + + if extraData.HasFinalityVote == 0 { + return nil, nil + } + + var vote finalityVote + vote.Signature = common.Bytes2Hex(extraData.AggregatedFinalityVotes.Marshal()) + + snap, err := api.consortium.snapshot(api.chain, header.Number.Uint64()-1, header.ParentHash, nil) + if err != nil { + return nil, err + } + position := extraData.FinalityVotedValidators.Indices() + for _, pos := range position { + validator := snap.ValidatorsWithBlsPub[pos] + vote.VoterAddress = append(vote.VoterAddress, validator.Address.Hex()) + vote.VoterPublicKey = append(vote.VoterPublicKey, common.Bytes2Hex(validator.BlsPublicKey.Marshal())) + } + + return &vote, nil +} diff --git a/consensus/consortium/v2/consortium.go b/consensus/consortium/v2/consortium.go index 68522ceb06..5b0a8285a3 100644 --- a/consensus/consortium/v2/consortium.go +++ b/consensus/consortium/v2/consortium.go @@ -2,27 +2,32 @@ package v2 import ( "bytes" + "encoding/hex" "errors" "fmt" "io" + "math" "math/big" "math/rand" "sort" "sync" "time" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/core" "github.com/common-nighthawk/go-figure" - "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" consortiumCommon "github.com/ethereum/go-ethereum/consensus/consortium/common" + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" @@ -38,12 +43,11 @@ const ( inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory inmemorySignatures = 4096 // Number of recent block signatures to keep in memory - extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity - extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal + wiggleTime = 1000 * time.Millisecond // Random delay (per signer) to allow concurrent signers + unSealableValidator = -1 - validatorBytesLength = common.AddressLength - wiggleTime = 1000 * time.Millisecond // Random delay (per signer) to allow concurrent signers - unSealableValidator = -1 + finalityRatio float64 = 2.0 / 3 + assemblingFinalityVoteDuration = 1 * time.Second ) // Consortium delegated proof-of-stake protocol constants. @@ -92,18 +96,19 @@ type Consortium struct { recents *lru.ARCCache // Snapshots for recent block to speed up reorgs signatures *lru.ARCCache // Signatures of recent blocks to speed up mining - val common.Address // Ethereum address of the signing key - signer types.Signer + lock sync.RWMutex // Protects the below 4 fields + val common.Address // Ethereum address of the signing key signFn consortiumCommon.SignerFn // Signer function to authorize hashes with signTxFn consortiumCommon.SignerTxFn + contract consortiumCommon.ContractInteraction - lock sync.RWMutex // Protects the signer fields - - ethAPI *ethapi.PublicBlockChainAPI - contract *consortiumCommon.ContractIntegrator + signer types.Signer + ethAPI *ethapi.PublicBlockChainAPI fakeDiff bool v1 consortiumCommon.ConsortiumAdapter + + votePool consensus.VotePool } // New creates a Consortium delegated proof-of-stake consensus engine @@ -200,6 +205,79 @@ func (c *Consortium) GetRecents(chain consensus.ChainHeaderReader, number uint64 return nil } +// VerifyVote check if the finality voter is in the validator set, it assumes the signature is +// already verified +func (c *Consortium) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { + header := chain.GetHeaderByHash(vote.Data.TargetHash) + if header == nil { + return errors.New("header not found") + } + + if header.Number.Uint64() != vote.Data.TargetNumber { + return finality.ErrInvalidTargetNumber + } + + // Look at the comment assembleFinalityVote in function for the + // detailed explanation on the snapshot we need to get to verify the + // finality vote. + // Here we want to verify vote for TargetNumber, so we get snapshot + // at TargetNumber. + snap, err := c.snapshot(chain, vote.Data.TargetNumber, vote.Data.TargetHash, nil) + if err != nil { + return err + } + + publicKey, err := blst.PublicKeyFromBytes(vote.PublicKey[:]) + if err != nil { + return err + } + if !snap.inBlsPublicKeySet(publicKey) { + return finality.ErrUnauthorizedFinalityVoter + } + + return nil +} + +// verifyFinalitySignatures verifies the finality signatures in the block header +func (c *Consortium) verifyFinalitySignatures( + chain consensus.ChainHeaderReader, + finalityVotedValidators finality.FinalityVoteBitSet, + finalitySignatures blsCommon.Signature, + parentNumber uint64, + parentHash common.Hash, + parents []*types.Header, +) error { + snap, err := c.snapshot(chain, parentNumber, parentHash, parents) + if err != nil { + return err + } + + votedValidatorPositions := finalityVotedValidators.Indices() + if len(votedValidatorPositions) < int(math.Floor(finalityRatio*float64(len(snap.ValidatorsWithBlsPub))))+1 { + return finality.ErrNotEnoughFinalityVote + } + + voteData := types.VoteData{ + TargetNumber: parentNumber, + TargetHash: parentHash, + } + digest := voteData.Hash() + + // verify aggregated signature + var publicKeys []blsCommon.PublicKey + for _, position := range votedValidatorPositions { + if position >= len(snap.ValidatorsWithBlsPub) { + return finality.ErrInvalidFinalityVotedBitSet + } + publicKeys = append(publicKeys, snap.ValidatorsWithBlsPub[position].BlsPublicKey) + } + if !finalitySignatures.FastAggregateVerify(publicKeys, digest) { + return finality.ErrFinalitySignatureVerificationFailed + } + + return nil +} + // VerifyHeaderAndParents checks whether a header conforms to the consensus rules.The // caller may optionally pass in a batch of parents (ascending order) to avoid // looking those up from the database. This is useful for concurrently verifying @@ -210,24 +288,30 @@ func (c *Consortium) VerifyHeaderAndParents(chain consensus.ChainHeaderReader, h } number := header.Number.Uint64() - // Check that the extra-data contains the vanity, validators and signature. - if len(header.Extra) < extraVanity { - return consortiumCommon.ErrMissingVanity - } - if len(header.Extra) < extraVanity+extraSeal { - return consortiumCommon.ErrMissingSignature + isShillin := c.chainConfig.IsShillin(header.Number) + extraData, err := finality.DecodeExtra(header.Extra, isShillin) + if err != nil { + return err } + // Check extra data isEpoch := number%c.config.EpochV2 == 0 || c.chainConfig.IsOnConsortiumV2(header.Number) - // Ensure that the extra-data contains a signer list on checkpoint, but none otherwise - signersBytes := len(header.Extra) - extraVanity - extraSeal - if !isEpoch && signersBytes != 0 { + if !isEpoch && len(extraData.CheckpointValidators) != 0 { return consortiumCommon.ErrExtraValidators } - if isEpoch && signersBytes%common.AddressLength != 0 { - return consortiumCommon.ErrInvalidSpanValidators + if isShillin && extraData.HasFinalityVote == 1 { + if err := c.verifyFinalitySignatures( + chain, + extraData.FinalityVotedValidators, + extraData.AggregatedFinalityVotes, + header.Number.Uint64()-1, + header.ParentHash, + parents, + ); err != nil { + return err + } } // Ensure that the mix digest is zero as we don't have fork protection currently @@ -333,12 +417,13 @@ func (c *Consortium) snapshot(chain consensus.ChainHeaderReader, number uint64, } // get validators set from number - validators, err = c.contract.GetValidators(big.NewInt(0).SetUint64(number)) + _, _, _, contract := c.readSignerAndContract() + validators, err = contract.GetValidators(big.NewInt(0).SetUint64(number)) if err != nil { log.Error("Load validators at the beginning failed", "err", err) return nil, err } - snap = newSnapshot(c.chainConfig, c.config, c.signatures, number, hash, validators, c.ethAPI) + snap = newSnapshot(c.chainConfig, c.config, c.signatures, number, hash, validators, nil, c.ethAPI) // load v1 recent list to prevent recent producing-block-validators produce block again snapV1 := c.v1.GetSnapshot(chain, number, parents) @@ -448,7 +533,7 @@ func (c *Consortium) verifySeal(chain consensus.ChainHeaderReader, header *types return errCoinBaseMisMatch } - if _, ok := snap.Validators[signer]; !ok { + if !snap.inInValidatorSet(signer) { return errUnauthorizedValidator } @@ -541,10 +626,57 @@ func (c *Consortium) verifyHeaderTime(header, parent *types.Header, snapshot *Sn return nil } +func (c *Consortium) getCheckpointValidatorsFromContract( + header *types.Header, +) ([]finality.ValidatorWithBlsPub, error) { + + parentBlockNumber := new(big.Int).Sub(header.Number, common.Big1) + _, _, _, contract := c.readSignerAndContract() + newValidators, err := contract.GetValidators(parentBlockNumber) + if err != nil { + return nil, err + } + + var ( + blsPublicKeys []blsCommon.PublicKey + checkpointValidator []finality.ValidatorWithBlsPub + filteredValidators []common.Address = newValidators + ) + + isShillin := c.chainConfig.IsShillin(header.Number) + if isShillin { + // The filteredValidators shares the same underlying array with newValidators + // See more: https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + filteredValidators = filteredValidators[:0] + for _, validator := range newValidators { + blsPublicKey, err := contract.GetBlsPublicKey(parentBlockNumber, validator) + if err == nil { + filteredValidators = append(filteredValidators, validator) + blsPublicKeys = append(blsPublicKeys, blsPublicKey) + } + } + } + + for i := range filteredValidators { + validatorWithBlsPub := finality.ValidatorWithBlsPub{ + Address: filteredValidators[i], + } + if isShillin { + validatorWithBlsPub.BlsPublicKey = blsPublicKeys[i] + } + + checkpointValidator = append(checkpointValidator, validatorWithBlsPub) + } + + // sort validator by address + sort.Sort(finality.CheckpointValidatorAscending(checkpointValidator)) + return checkpointValidator, nil +} + // Prepare implements consensus.Engine, preparing all the consensus fields of the // header for running the transactions on top. func (c *Consortium) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { - coinbase, _, _ := c.readSigner() + coinbase, _, _, _ := c.readSignerAndContract() header.Coinbase = coinbase header.Nonce = types.BlockNonce{} @@ -557,28 +689,22 @@ func (c *Consortium) Prepare(chain consensus.ChainHeaderReader, header *types.He // Set the correct difficulty header.Difficulty = CalcDifficulty(snap, coinbase) - // Ensure the extra data has all it's components - if len(header.Extra) < extraVanity { - header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...) - } - header.Extra = header.Extra[:extraVanity] + isShillin := c.chainConfig.IsShillin(header.Number) + var extraData finality.HeaderExtraData if number%c.config.EpochV2 == 0 || c.chainConfig.IsOnConsortiumV2(big.NewInt(int64(number))) { - // This block is not inserted, the transactions in this block are not applied, so we need - // the call GetValidators at the context of previous block - newValidators, err := c.contract.GetValidators(new(big.Int).Sub(header.Number, common.Big1)) + checkpointValidator, err := c.getCheckpointValidatorsFromContract(header) if err != nil { return err } - // Sort validators by address - sort.Sort(validatorsAscending(newValidators)) - for _, validator := range newValidators { - header.Extra = append(header.Extra, validator.Bytes()...) - } + extraData.CheckpointValidators = checkpointValidator } - // Add extra seal space - header.Extra = append(header.Extra, make([]byte, extraSeal)...) + // After Shillin, extraData.hasFinalityVote = 0 here as we does + // not assemble finality vote yet. Let's wait some time for the + // finality votes to be broadcasted around the network. The + // finality votes are assembled later in Seal function. + header.Extra = extraData.Encode(isShillin) // Mix digest is reserved for now, set to empty header.MixDigest = common.Hash{} @@ -593,23 +719,44 @@ func (c *Consortium) Prepare(chain consensus.ChainHeaderReader, header *types.He return nil } -func (c *Consortium) submitBlockReward(transactOpts *consortiumCommon.ApplyTransactOpts) error { - if err := c.contract.SubmitBlockReward(transactOpts); err != nil { - log.Error("Failed to submit block reward", "err", err) +func (c *Consortium) processSystemTransactions(chain consensus.ChainHeaderReader, header *types.Header, + transactOpts *consortiumCommon.ApplyTransactOpts, isFinalizeAndAssemble bool) error { + + snap, err := c.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil) + if err != nil { return err } - return nil -} -func (c *Consortium) processSystemTransactions(chain consensus.ChainHeaderReader, header *types.Header, - transactOpts *consortiumCommon.ApplyTransactOpts, isFinalizeAndAssemble bool) error { + _, _, _, contract := c.readSignerAndContract() - if header.Difficulty.Cmp(diffInTurn) != 0 { - number := header.Number.Uint64() - snap, err := c.snapshot(chain, number-1, header.ParentHash, nil) + // If the parent's block includes the finality votes, distribute reward for the voters + if c.chainConfig.IsShillin(new(big.Int).Sub(header.Number, common.Big1)) { + parentHeader := chain.GetHeaderByHash(header.ParentHash) + extraData, err := finality.DecodeExtra(parentHeader.Extra, true) if err != nil { return err } + if extraData.HasFinalityVote == 1 { + parentSnap, err := c.snapshot(chain, parentHeader.Number.Uint64()-1, parentHeader.ParentHash, nil) + if err != nil { + return err + } + + votedValidatorPositions := extraData.FinalityVotedValidators.Indices() + var votedValidators []common.Address + for _, position := range votedValidatorPositions { + // The header has been verified so there must be no out of bound here + votedValidators = append(votedValidators, parentSnap.ValidatorsWithBlsPub[position].Address) + } + + if err := contract.FinalityReward(transactOpts, votedValidators); err != nil { + log.Error("Failed to finality reward validator", "err", err) + return err + } + } + } + + if header.Difficulty.Cmp(diffInTurn) != 0 { spoiledVal := snap.supposeValidator() signedRecently := false if c.chainConfig.IsOlek(header.Number) { @@ -626,7 +773,7 @@ func (c *Consortium) processSystemTransactions(chain consensus.ChainHeaderReader if !isFinalizeAndAssemble { log.Info("Slash validator", "number", header.Number, "spoiled", spoiledVal) } - if err := c.contract.Slash(transactOpts, spoiledVal); err != nil { + if err := contract.Slash(transactOpts, spoiledVal); err != nil { // it is possible that slash validator failed because of the slash channel is disabled. log.Error("Failed to slash validator", "block hash", header.Hash(), "address", spoiledVal) return err @@ -637,20 +784,24 @@ func (c *Consortium) processSystemTransactions(chain consensus.ChainHeaderReader // Previously, we call WrapUpEpoch before SubmitBlockReward which is the wrong order. // We create a hardfork here to fix the contract call order. if c.chainConfig.IsPuffy(header.Number) { - if err := c.submitBlockReward(transactOpts); err != nil { + if err := contract.SubmitBlockReward(transactOpts); err != nil { + log.Error("Failed to submit block reward", "err", err) return err } } if header.Number.Uint64()%c.config.EpochV2 == c.config.EpochV2-1 { - if err := c.contract.WrapUpEpoch(transactOpts); err != nil { + if err := contract.WrapUpEpoch(transactOpts); err != nil { log.Error("Failed to wrap up epoch", "err", err) return err } } if !c.chainConfig.IsPuffy(header.Number) { - return c.submitBlockReward(transactOpts) + if err := contract.SubmitBlockReward(transactOpts); err != nil { + log.Error("Failed to submit block reward", "err", err) + return err + } } return nil @@ -662,7 +813,7 @@ func (c *Consortium) processSystemTransactions(chain consensus.ChainHeaderReader // - SubmitBlockRewards of the current block func (c *Consortium) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs *[]*types.Transaction, uncles []*types.Header, receipts *[]*types.Receipt, systemTxs *[]*types.Transaction, internalTxs *[]*types.InternalTransaction, usedGas *uint64) error { - _, _, signTxFn := c.readSigner() + _, _, signTxFn, _ := c.readSignerAndContract() evmContext := core.NewEVMBlockContext(header, consortiumCommon.ChainContext{Chain: chain, Consortium: c}, &header.Coinbase, chain.OpEvents()...) transactOpts := &consortiumCommon.ApplyTransactOpts{ ApplyMessageOpts: &consortiumCommon.ApplyMessageOpts{ @@ -683,26 +834,35 @@ func (c *Consortium) Finalize(chain consensus.ChainHeaderReader, header *types.H EthAPI: c.ethAPI, } + isShillin := c.chainConfig.IsShillin(header.Number) + // If the block is an epoch end block, verify the validator list // The verification can only be done when the state is ready, it can't be done in VerifyHeader. if header.Number.Uint64()%c.config.EpochV2 == 0 { - // The GetValidators in Prepare is called on the context of previous block so here it must - // be called on context of previous block too - newValidators, err := c.contract.GetValidators(new(big.Int).Sub(header.Number, common.Big1)) + checkpointValidator, err := c.getCheckpointValidatorsFromContract(header) if err != nil { return err } - // sort validator by address - sort.Sort(validatorsAscending(newValidators)) - validatorsBytes := make([]byte, len(newValidators)*validatorBytesLength) - for i, validator := range newValidators { - copy(validatorsBytes[i*validatorBytesLength:], validator.Bytes()) + extraData, err := finality.DecodeExtra(header.Extra, isShillin) + if err != nil { + return err } - extraSuffix := len(header.Extra) - extraSeal - if !bytes.Equal(header.Extra[extraVanity:extraSuffix], validatorsBytes) { + if len(checkpointValidator) != len(extraData.CheckpointValidators) { return errMismatchingEpochValidators } + + for i := range checkpointValidator { + if checkpointValidator[i].Address != extraData.CheckpointValidators[i].Address { + return errMismatchingEpochValidators + } + + if isShillin { + if !checkpointValidator[i].BlsPublicKey.Equals(extraData.CheckpointValidators[i].BlsPublicKey) { + return errMismatchingEpochValidators + } + } + } } if err := c.processSystemTransactions(chain, header, transactOpts, false); err != nil { @@ -730,7 +890,7 @@ func (c *Consortium) FinalizeAndAssemble(chain consensus.ChainHeaderReader, head if receipts == nil { receipts = make([]*types.Receipt, 0) } - _, _, signTxFn := c.readSigner() + _, _, signTxFn, _ := c.readSignerAndContract() evmContext := core.NewEVMBlockContext(header, consortiumCommon.ChainContext{Chain: chain, Consortium: c}, &header.Coinbase, chain.OpEvents()...) transactOpts := &consortiumCommon.ApplyTransactOpts{ ApplyMessageOpts: &consortiumCommon.ApplyMessageOpts{ @@ -780,10 +940,11 @@ func (c *Consortium) FinalizeAndAssemble(chain consensus.ChainHeaderReader, head // Authorize injects a private key into the consensus engine to mint new blocks with func (c *Consortium) Authorize(signer common.Address, signFn consortiumCommon.SignerFn, signTxFn consortiumCommon.SignerTxFn) { c.lock.Lock() + defer c.lock.Unlock() + c.val = signer c.signFn = signFn c.signTxFn = signTxFn - c.lock.Unlock() err := c.initContract(signer, signTxFn) if err != nil { @@ -807,7 +968,7 @@ func (c *Consortium) Seal(chain consensus.ChainHeaderReader, block *types.Block, return nil } // Don't hold the val fields for the entire sealing procedure - val, signFn, _ := c.readSigner() + val, signFn, _, _ := c.readSignerAndContract() snap, err := c.snapshot(chain, number-1, header.ParentHash, nil) if err != nil { @@ -815,7 +976,7 @@ func (c *Consortium) Seal(chain consensus.ChainHeaderReader, block *types.Block, } // Bail out if we're unauthorized to sign a block - if _, authorized := snap.Validators[val]; !authorized { + if !snap.inInValidatorSet(val) { return errUnauthorizedValidator } @@ -830,7 +991,7 @@ func (c *Consortium) Seal(chain consensus.ChainHeaderReader, block *types.Block, if !c.chainConfig.IsBuba(block.Number()) { if header.Difficulty.Cmp(diffInTurn) != 0 { // It's not our turn explicitly to sign, delay it a bit - wiggle := time.Duration(len(snap.Validators)/2+1) * wiggleTime + wiggle := time.Duration(len(snap.validators())/2+1) * wiggleTime delay += time.Duration(rand.Int63n(int64(wiggle))) + wiggleTime // delay for 0.5s more log.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle)) @@ -838,16 +999,25 @@ func (c *Consortium) Seal(chain consensus.ChainHeaderReader, block *types.Block, } log.Info("Sealing block with", "number", number, "delay", delay, "headerDifficulty", header.Difficulty, "val", val.Hex(), "txs", len(block.Transactions())) - // Sign all the things! - sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeConsortium, consortiumRLP(header, c.chainConfig.ChainID)) - if err != nil { - return err - } - copy(header.Extra[len(header.Extra)-extraSeal:], sig) - // Wait until sealing is terminated or delay timeout. log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay)) go func() { + select { + case <-stop: + return + case <-time.After(delay - assemblingFinalityVoteDuration): + c.assembleFinalityVote(header, snap) + + // Sign all the things! + sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeConsortium, consortiumRLP(header, c.chainConfig.ChainID)) + if err != nil { + log.Error("Failed to seal block", "err", err) + return + } + copy(header.Extra[len(header.Extra)-consortiumCommon.ExtraSeal:], sig) + } + + delay = time.Until(time.Unix(int64(header.Time), 0)) select { case <-stop: return @@ -857,7 +1027,7 @@ func (c *Consortium) Seal(chain consensus.ChainHeaderReader, block *types.Block, select { case results <- block.WithSeal(header): default: - log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header, c.chainConfig.ChainID)) + log.Warn("Sealing result is not read by miner", "sealhash", calculateSealHash(header, c.chainConfig.ChainID)) } }() @@ -866,7 +1036,24 @@ func (c *Consortium) Seal(chain consensus.ChainHeaderReader, block *types.Block, // SealHash returns the hash of a block prior to it being sealed. func (c *Consortium) SealHash(header *types.Header) common.Hash { - return SealHash(header, c.chainConfig.ChainID) + isShillin := c.chainConfig.IsShillin(header.Number) + if isShillin { + // After Shillin, this consensus.SealHash function does not + // return the real hash used for sealing because the real + // hash changes after the FinalizeAndAssemble call. As this + // function is used by worker only to store and look up the + // sealing tasks, we just return the hash of header without + // the finality vote, so this hash remains unchanged after + // FinalizeAndAssemble call. + copyHeader := types.CopyHeader(header) + + extraData, _ := finality.DecodeExtra(copyHeader.Extra, true) + extraData.HasFinalityVote = 0 + copyHeader.Extra = extraData.Encode(true) + return calculateSealHash(copyHeader, c.chainConfig.ChainID) + } else { + return calculateSealHash(header, c.chainConfig.ChainID) + } } // Close implements consensus.Engine. It's a noop for Consortium as there are no background threads. @@ -876,7 +1063,14 @@ func (c *Consortium) Close() error { // APIs are backward compatible with the v1, so we do not to implement it again func (c *Consortium) APIs(chain consensus.ChainHeaderReader) []rpc.API { - return []rpc.API{} + return []rpc.API{ + { + Namespace: "consortiumv2", + Version: "1.0", + Service: &consortiumV2Api{chain: chain, consortium: c}, + Public: false, + }, + } } // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty @@ -888,7 +1082,7 @@ func (c *Consortium) CalcDifficulty(chain consensus.ChainHeaderReader, time uint if err != nil { return nil } - coinbase, _, _ := c.readSigner() + coinbase, _, _, _ := c.readSignerAndContract() return CalcDifficulty(snap, coinbase) } @@ -913,11 +1107,16 @@ func (c *Consortium) initContract(coinbase common.Address, signTxFn consortiumCo return nil } -func (c *Consortium) readSigner() (common.Address, consortiumCommon.SignerFn, consortiumCommon.SignerTxFn) { +func (c *Consortium) readSignerAndContract() ( + common.Address, + consortiumCommon.SignerFn, + consortiumCommon.SignerTxFn, + consortiumCommon.ContractInteraction, +) { c.lock.RLock() defer c.lock.RUnlock() - return c.val, c.signFn, c.signTxFn + return c.val, c.signFn, c.signTxFn, c.contract } // GetBestParentBlock goes backward in the canonical chain to find if the miner can @@ -925,7 +1124,7 @@ func (c *Consortium) readSigner() (common.Address, consortiumCommon.SignerFn, co // cannot create a better chain, this function returns the head block of current // canonical chain. func (c *Consortium) GetBestParentBlock(chain *core.BlockChain) (*types.Block, bool) { - signer, _, _ := c.readSigner() + signer, _, _, _ := c.readSignerAndContract() currentBlock := chain.CurrentBlock() block := currentBlock @@ -952,6 +1151,181 @@ func (c *Consortium) GetBestParentBlock(chain *core.BlockChain) (*types.Block, b return currentBlock, false } +// GetJustifiedBlock gets the fast finality justified block +func (c *Consortium) GetJustifiedBlock(chain consensus.ChainHeaderReader, blockNumber uint64, blockHash common.Hash) (uint64, common.Hash) { + snap, err := c.snapshot(chain, blockNumber, blockHash, nil) + if err != nil { + log.Error("Failed to get snapshot", "err", err) + return 0, common.Hash{} + } + + return snap.JustifiedBlockNumber, snap.JustifiedBlockHash +} + +// assembleFinalityVote collects finality votes from vote pool and assembles +// them into block header +// +// block (N) <- block (N + 1) +// Block (N) is justified means there are enough finality votes for block (N) in +// block (N + 1) +// The finality vote in block (N + 1) is verified by validator set that are able +// to produce block (N + 1) (ignoring the recently signed rule) which is in +// snapshot (N) +// So here when including the vote for header.Number - 1 into header.Number, the +// snapshot provided must be at header.Number - 1 +func (c *Consortium) assembleFinalityVote(header *types.Header, snap *Snapshot) { + if c.chainConfig.IsShillin(header.Number) { + var ( + signatures []blsCommon.Signature + finalityVotedValidators finality.FinalityVoteBitSet + finalityThreshold int = int(math.Floor(finalityRatio*float64(len(snap.ValidatorsWithBlsPub)))) + 1 + ) + + // We assume the signature has been verified in vote pool + // so we do not verify signature here + if c.votePool != nil { + votes := c.votePool.FetchVoteByBlockHash(header.ParentHash) + if len(votes) >= finalityThreshold { + for _, vote := range votes { + publicKey, err := blst.PublicKeyFromBytes(vote.PublicKey[:]) + if err != nil { + log.Warn("Malformed public key from vote pool", "err", err) + continue + } + authorized := false + for valPosition, validator := range snap.ValidatorsWithBlsPub { + if publicKey.Equals(validator.BlsPublicKey) { + signature, err := blst.SignatureFromBytes(vote.Signature[:]) + if err != nil { + log.Warn("Malformed signature from vote pool", "err", err) + break + } + signatures = append(signatures, signature) + finalityVotedValidators.SetBit(valPosition) + authorized = true + break + } + } + if !authorized { + log.Warn("Unauthorized voter's signature from vote pool", "publicKey", hex.EncodeToString(publicKey.Marshal())) + } + } + + bitSetCount := len(finalityVotedValidators.Indices()) + if bitSetCount >= finalityThreshold { + extraData, err := finality.DecodeExtra(header.Extra, true) + if err != nil { + // This should not happen + log.Error("Failed to decode header extra data", "err", err) + return + } + extraData.HasFinalityVote = 1 + extraData.FinalityVotedValidators = finalityVotedValidators + extraData.AggregatedFinalityVotes = blst.AggregateSignatures(signatures) + header.Extra = extraData.Encode(true) + } + } + } + } + +} + +// GetFinalizedBlock gets the fast finality finalized block +func (c *Consortium) GetFinalizedBlock( + chain consensus.ChainHeaderReader, + headNumber uint64, + headHash common.Hash, +) (uint64, common.Hash) { + var ( + justifiedNumber, descendantJustifiedNumber uint64 + justifiedHash, descendantJustifiedHash common.Hash + ) + + justifiedNumber = headNumber + justifiedHash = headHash + + for { + // When getting the snapshot at block N, the maximum justified number is N - 1. + // Here, we want to check if the block at justifiedNumber - 1 is justified too. + // So, the snapshot we need to look up is at justifiedNumber. + justifiedNumber, justifiedHash = c.GetJustifiedBlock(chain, justifiedNumber, justifiedHash) + if justifiedNumber == 0 { + return 0, common.Hash{} + } + + // Check if the block is justified and its direct descendant is also justified + if descendantJustifiedNumber != 0 && descendantJustifiedNumber-1 == justifiedNumber { + // Check if the justified block and its justified direct descendant are voted by the + // same set of validators. + // The validator set verifies finality vote for block (N) is in the snapshot (N) + descendantSnap, err := c.snapshot(chain, descendantJustifiedNumber, descendantJustifiedHash, nil) + if err != nil { + return 0, common.Hash{} + } + + snap, err := c.snapshot(chain, justifiedNumber, justifiedHash, nil) + if err != nil { + return 0, common.Hash{} + } + + descendantValidator := descendantSnap.validators() + snapValidator := snap.validators() + + if len(descendantValidator) == len(snapValidator) { + var i int + for i = 0; i < len(descendantValidator); i++ { + if descendantValidator[i] != snapValidator[i] { + break + } + } + + if i == len(descendantValidator) { + return justifiedNumber, justifiedHash + } + } + } + + descendantJustifiedNumber = justifiedNumber + descendantJustifiedHash = justifiedHash + } +} + +// SetVotePool sets the finality vote pool to be used by consensus +// engine +func (c *Consortium) SetVotePool(votePool consensus.VotePool) { + c.votePool = votePool +} + +// IsActiveValidatorAt is used to check if we can vote for header.Number (the vote +// is included at header.Number + 1). As explained in assembleFinalityVote, the vote +// for header.Number is verified by the validator set at snapshot at block.Number. +// So here we get the snapshot at block.Number not at block.Number - 1 +func (c *Consortium) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *types.Header) bool { + snap, err := c.snapshot(chain, header.Number.Uint64(), header.Hash(), nil) + if err != nil { + return false + } + + nodeValidator, _, _, _ := c.readSignerAndContract() + return snap.inInValidatorSet(nodeValidator) +} + +// GetActiveValidatorAt gets the validator that can vote for block number +// (the vote is included in block number + 1), so get the snapshot at +// block number +func (c *Consortium) GetActiveValidatorAt( + chain consensus.ChainHeaderReader, + blockNumber uint64, + blockHash common.Hash, +) []finality.ValidatorWithBlsPub { + snap, err := c.snapshot(chain, blockNumber, blockHash, nil) + if err != nil { + return nil + } + + return snap.ValidatorsWithBlsPub +} + // ecrecover extracts the Ronin account address from a signed header. func ecrecover(header *types.Header, sigcache *lru.ARCCache, chainId *big.Int) (common.Address, error) { // If the signature's already cached, return that @@ -966,7 +1340,7 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache, chainId *big.Int) ( signature := header.Extra[len(header.Extra)-consortiumCommon.ExtraSeal:] // Recover the public key and the Ethereum address - pubkey, err := crypto.Ecrecover(SealHash(header, chainId).Bytes(), signature) + pubkey, err := crypto.Ecrecover(calculateSealHash(header, chainId).Bytes(), signature) if err != nil { return common.Address{}, err } @@ -977,8 +1351,8 @@ func ecrecover(header *types.Header, sigcache *lru.ARCCache, chainId *big.Int) ( return signer, nil } -// SealHash returns the hash of a block prior to it being sealed. -func SealHash(header *types.Header, chainId *big.Int) (hash common.Hash) { +// calculateSealHash returns the hash of a block prior to it being sealed. +func calculateSealHash(header *types.Header, chainId *big.Int) (hash common.Hash) { hasher := sha3.NewLegacyKeccak256() encodeSigHeader(hasher, header, chainId) hasher.Sum(hash[:0]) diff --git a/consensus/consortium/v2/consortium_test.go b/consensus/consortium/v2/consortium_test.go index 2cee1916f4..653eca4854 100644 --- a/consensus/consortium/v2/consortium_test.go +++ b/consensus/consortium/v2/consortium_test.go @@ -1,6 +1,8 @@ package v2 import ( + "bytes" + "encoding/binary" "errors" "math/big" "testing" @@ -8,8 +10,17 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + consortiumCommon "github.com/ethereum/go-ethereum/consensus/consortium/common" + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/params" + lru "github.com/hashicorp/golang-lru" ) func TestSealableValidators(t *testing.T) { @@ -20,7 +31,7 @@ func TestSealableValidators(t *testing.T) { validators = append(validators, common.BigToAddress(big.NewInt(int64(i)))) } - snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil) + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil, nil) for i := 0; i <= 10; i++ { snap.Recents[uint64(i)] = common.BigToAddress(big.NewInt(int64(i))) } @@ -67,7 +78,7 @@ func TestBackoffTime(t *testing.T) { validators = append(validators, common.BigToAddress(big.NewInt(int64(i)))) } - snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil) + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil, nil) for i := 0; i <= 10; i++ { snap.Recents[uint64(i)] = common.BigToAddress(big.NewInt(int64(i))) } @@ -121,7 +132,7 @@ func TestBackoffTimeOlek(t *testing.T) { validators = append(validators, common.BigToAddress(big.NewInt(int64(i)))) } - snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil) + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil, nil) for i := 0; i <= 10; i++ { snap.Recents[uint64(i)] = common.BigToAddress(big.NewInt(int64(i))) } @@ -173,7 +184,7 @@ func TestBackoffTimeInturnValidatorInRecentList(t *testing.T) { validators = append(validators, common.BigToAddress(big.NewInt(int64(i)))) } - snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil) + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil, nil) for i := 0; i <= 9; i++ { snap.Recents[uint64(i)] = common.BigToAddress(big.NewInt(int64(i))) } @@ -230,7 +241,7 @@ func TestVerifyBlockHeaderTime(t *testing.T) { validators = append(validators, common.BigToAddress(big.NewInt(int64(i)))) } - snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil) + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, validators, nil, nil) for i := 0; i <= 10; i++ { snap.Recents[uint64(i)] = common.BigToAddress(big.NewInt(int64(i))) } @@ -274,3 +285,690 @@ func TestVerifyBlockHeaderTime(t *testing.T) { t.Errorf("Expect successful verification, got %s", err) } } + +func TestExtraDataEncode(t *testing.T) { + extraData := finality.HeaderExtraData{} + data := extraData.Encode(false) + expectedLen := consortiumCommon.ExtraSeal + consortiumCommon.ExtraVanity + if len(data) != expectedLen { + t.Errorf( + "Mismatch header extra data length before hardfork, have %v expect %v", + len(data), expectedLen, + ) + } + + extraData = finality.HeaderExtraData{ + CheckpointValidators: []finality.ValidatorWithBlsPub{ + { + Address: common.Address{0x1}, + }, + { + Address: common.Address{0x2}, + }, + }, + } + expectedLen = consortiumCommon.ExtraSeal + consortiumCommon.ExtraVanity + common.AddressLength*2 + data = extraData.Encode(false) + if len(data) != expectedLen { + t.Errorf( + "Mismatch header extra data length before hardfork, have %v expect %v", + len(data), expectedLen, + ) + } + + expectedLen = consortiumCommon.ExtraSeal + consortiumCommon.ExtraVanity + 1 + extraData = finality.HeaderExtraData{} + data = extraData.Encode(true) + if len(data) != expectedLen { + t.Errorf( + "Mismatch header extra data length before hardfork, have %v expect %v", + len(data), expectedLen, + ) + } + + secretKey, err := blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err %s", err) + } + dummyDigest := [32]byte{} + signature := secretKey.Sign(dummyDigest[:]) + + extraData = finality.HeaderExtraData{ + HasFinalityVote: 1, + AggregatedFinalityVotes: signature, + } + expectedLen = consortiumCommon.ExtraSeal + consortiumCommon.ExtraVanity + 1 + 8 + params.BLSSignatureLength + data = extraData.Encode(true) + if len(data) != expectedLen { + t.Errorf( + "Mismatch header extra data length after hardfork, have %v expect %v", + len(data), expectedLen, + ) + } + + extraData = finality.HeaderExtraData{ + HasFinalityVote: 1, + AggregatedFinalityVotes: signature, + CheckpointValidators: []finality.ValidatorWithBlsPub{ + { + Address: common.Address{0x1}, + BlsPublicKey: secretKey.PublicKey(), + }, + { + Address: common.Address{0x2}, + BlsPublicKey: secretKey.PublicKey(), + }, + }, + } + expectedLen = consortiumCommon.ExtraSeal + consortiumCommon.ExtraVanity + 1 + 8 + params.BLSSignatureLength + 2*(common.AddressLength+params.BLSPubkeyLength) + data = extraData.Encode(true) + if len(data) != expectedLen { + t.Errorf( + "Mismatch header extra data length after hardfork, have %v expect %v", + len(data), expectedLen, + ) + } +} + +func TestExtraDataDecode(t *testing.T) { + secretKey, err := blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err %s", err) + } + dummyDigest := [32]byte{} + signature := secretKey.Sign(dummyDigest[:]) + + rawBytes := []byte{'t', 'e', 's', 't'} + _, err = finality.DecodeExtra(rawBytes, false) + if !errors.Is(err, finality.ErrMissingVanity) { + t.Errorf("Expect error %v have %v", finality.ErrMissingVanity, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + _, err = finality.DecodeExtra(rawBytes, false) + if !errors.Is(err, finality.ErrMissingSignature) { + t.Errorf("Expect error %v have %v", finality.ErrMissingSignature, err) + } + + rawBytes = append(rawBytes, byte(12)) + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraSeal)...) + _, err = finality.DecodeExtra(rawBytes, false) + if !errors.Is(err, finality.ErrInvalidSpanValidators) { + t.Errorf("Expect error %v have %v", finality.ErrInvalidSpanValidators, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + _, err = finality.DecodeExtra(rawBytes, true) + if !errors.Is(err, finality.ErrMissingHasFinalityVote) { + t.Errorf("Expect error %v have %v", finality.ErrMissingHasFinalityVote, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x00)) + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraSeal)...) + _, err = finality.DecodeExtra(rawBytes, true) + if err != nil { + t.Errorf("Expect successful decode have %v", err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x01)) + _, err = finality.DecodeExtra(rawBytes, true) + if !errors.Is(err, finality.ErrMissingFinalityVoteBitSet) { + t.Errorf("Expect error %v have %v", finality.ErrMissingFinalityVoteBitSet, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x01)) + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, 0) + _, err = finality.DecodeExtra(rawBytes, true) + if !errors.Is(err, finality.ErrMissingFinalitySignature) { + t.Errorf("Expect error %v have %v", finality.ErrMissingFinalitySignature, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x01)) + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, 0) + rawBytes = append(rawBytes, signature.Marshal()...) + _, err = finality.DecodeExtra(rawBytes, true) + if !errors.Is(err, finality.ErrMissingSignature) { + t.Errorf("Expect error %v have %v", finality.ErrMissingSignature, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x01)) + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, 0) + rawBytes = append(rawBytes, signature.Marshal()...) + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraSeal)...) + _, err = finality.DecodeExtra(rawBytes, true) + if err != nil { + t.Errorf("Expect successful decode have %v", err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x01)) + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, 0) + rawBytes = append(rawBytes, signature.Marshal()...) + rawBytes = append(rawBytes, common.Address{0x1}.Bytes()...) + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraSeal)...) + _, err = finality.DecodeExtra(rawBytes, true) + if !errors.Is(err, finality.ErrInvalidSpanValidators) { + t.Errorf("Expect error %v have %v", finality.ErrInvalidSpanValidators, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x02)) + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, 0) + rawBytes = append(rawBytes, signature.Marshal()...) + rawBytes = append(rawBytes, common.Address{0x1}.Bytes()...) + rawBytes = append(rawBytes, secretKey.PublicKey().Marshal()...) + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraSeal)...) + _, err = finality.DecodeExtra(rawBytes, true) + if !errors.Is(err, finality.ErrInvalidHasFinalityVote) { + t.Errorf("Expect error %v have %v", finality.ErrInvalidHasFinalityVote, err) + } + + rawBytes = []byte{} + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraVanity)...) + rawBytes = append(rawBytes, byte(0x01)) + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, 0) + rawBytes = append(rawBytes, signature.Marshal()...) + rawBytes = append(rawBytes, common.Address{0x1}.Bytes()...) + rawBytes = append(rawBytes, secretKey.PublicKey().Marshal()...) + rawBytes = append(rawBytes, bytes.Repeat([]byte{0x00}, consortiumCommon.ExtraSeal)...) + _, err = finality.DecodeExtra(rawBytes, true) + if err != nil { + t.Errorf("Expect successful decode have %v", err) + } + + extraData := finality.HeaderExtraData{ + HasFinalityVote: 1, + AggregatedFinalityVotes: signature, + CheckpointValidators: []finality.ValidatorWithBlsPub{ + { + Address: common.Address{0x1}, + BlsPublicKey: secretKey.PublicKey(), + }, + { + Address: common.Address{0x2}, + BlsPublicKey: secretKey.PublicKey(), + }, + }, + } + data := extraData.Encode(true) + decodedData, err := finality.DecodeExtra(data, true) + if err != nil { + t.Errorf("Expect successful decode have %v", err) + } + + // Do some sanity checks + if !bytes.Equal( + decodedData.AggregatedFinalityVotes.Marshal(), + extraData.AggregatedFinalityVotes.Marshal(), + ) { + t.Errorf("Mismatch decoded data") + } + + if decodedData.CheckpointValidators[0].Address != extraData.CheckpointValidators[0].Address { + t.Errorf("Mismatch decoded data") + } + + if !decodedData.CheckpointValidators[0].BlsPublicKey.Equals(extraData.CheckpointValidators[0].BlsPublicKey) { + t.Errorf("Mismatch decoded data") + } +} + +func TestVerifyFinalitySignature(t *testing.T) { + const numValidator = 3 + var err error + + secretKey := make([]blsCommon.SecretKey, numValidator+1) + for i := 0; i < len(secretKey); i++ { + secretKey[i], err = blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err %s", err) + } + } + + valWithBlsPub := make([]finality.ValidatorWithBlsPub, numValidator) + for i := 0; i < len(valWithBlsPub); i++ { + valWithBlsPub[i] = finality.ValidatorWithBlsPub{ + Address: common.BigToAddress(big.NewInt(int64(i))), + BlsPublicKey: secretKey[i].PublicKey(), + } + } + + blockNumber := uint64(0) + blockHash := common.Hash{0x1} + vote := types.VoteData{ + TargetNumber: blockNumber, + TargetHash: blockHash, + } + + digest := vote.Hash() + signature := make([]blsCommon.Signature, numValidator+1) + for i := 0; i < len(signature); i++ { + signature[i] = secretKey[i].Sign(digest[:]) + } + + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, nil, valWithBlsPub, nil) + recents, _ := lru.NewARC(inmemorySnapshots) + c := Consortium{ + chainConfig: ¶ms.ChainConfig{ + ShillinBlock: big.NewInt(0), + }, + config: ¶ms.ConsortiumConfig{ + EpochV2: 300, + }, + recents: recents, + } + snap.Hash = blockHash + c.recents.Add(snap.Hash, snap) + + var votedBitSet finality.FinalityVoteBitSet + votedBitSet.SetBit(0) + err = c.verifyFinalitySignatures(nil, votedBitSet, nil, blockNumber, blockHash, nil) + if !errors.Is(err, finality.ErrNotEnoughFinalityVote) { + t.Errorf("Expect error %v have %v", finality.ErrNotEnoughFinalityVote, err) + } + + votedBitSet = finality.FinalityVoteBitSet(0) + votedBitSet.SetBit(0) + votedBitSet.SetBit(1) + votedBitSet.SetBit(3) + err = c.verifyFinalitySignatures(nil, votedBitSet, nil, 0, snap.Hash, nil) + if !errors.Is(err, finality.ErrInvalidFinalityVotedBitSet) { + t.Errorf("Expect error %v have %v", finality.ErrInvalidFinalityVotedBitSet, err) + } + + votedBitSet = finality.FinalityVoteBitSet(0) + votedBitSet.SetBit(0) + votedBitSet.SetBit(1) + votedBitSet.SetBit(2) + aggregatedSignature := blst.AggregateSignatures([]blsCommon.Signature{ + signature[0], + signature[1], + signature[3], + }) + err = c.verifyFinalitySignatures(nil, votedBitSet, aggregatedSignature, 0, snap.Hash, nil) + if !errors.Is(err, finality.ErrFinalitySignatureVerificationFailed) { + t.Errorf("Expect error %v have %v", finality.ErrFinalitySignatureVerificationFailed, err) + } + + votedBitSet = finality.FinalityVoteBitSet(0) + votedBitSet.SetBit(0) + votedBitSet.SetBit(1) + votedBitSet.SetBit(2) + aggregatedSignature = blst.AggregateSignatures([]blsCommon.Signature{ + signature[0], + signature[1], + signature[2], + signature[3], + }) + err = c.verifyFinalitySignatures(nil, votedBitSet, aggregatedSignature, 0, snap.Hash, nil) + if !errors.Is(err, finality.ErrFinalitySignatureVerificationFailed) { + t.Errorf("Expect error %v have %v", finality.ErrFinalitySignatureVerificationFailed, err) + } + + votedBitSet = finality.FinalityVoteBitSet(0) + votedBitSet.SetBit(0) + votedBitSet.SetBit(1) + votedBitSet.SetBit(2) + aggregatedSignature = blst.AggregateSignatures([]blsCommon.Signature{ + signature[0], + signature[1], + signature[2], + }) + err = c.verifyFinalitySignatures(nil, votedBitSet, aggregatedSignature, 0, snap.Hash, nil) + if err != nil { + t.Errorf("Expect successful verification have %v", err) + } +} + +func TestSnapshotValidatorWithBlsKey(t *testing.T) { + secretKey, err := blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err: %s", err) + } + + validators := []finality.ValidatorWithBlsPub{ + { + Address: common.Address{0x1}, + BlsPublicKey: secretKey.PublicKey(), + }, + } + snap := newSnapshot(nil, nil, nil, 10, common.Hash{0x2}, nil, validators, nil) + db := rawdb.NewMemoryDatabase() + err = snap.store(db) + if err != nil { + t.Fatalf("Failed to store snapshot, err: %s", err) + } + + savedSnap, err := loadSnapshot(nil, nil, db, common.Hash{0x2}, nil, nil) + if err != nil { + t.Fatalf("Failed to load snapshot, err: %s", err) + } + + savedValidators := savedSnap.ValidatorsWithBlsPub + if len(savedValidators) != len(validators) { + t.Fatalf("Saved snapshot is corrupted") + } + + for i := range validators { + if validators[i].Address != savedValidators[i].Address { + t.Fatalf("Saved snapshot is corrupted") + } + + if !validators[i].BlsPublicKey.Equals(savedValidators[i].BlsPublicKey) { + t.Fatalf("Saved snapshot is corrupted") + } + } +} + +type mockContract struct { + validators map[common.Address]blsCommon.PublicKey +} + +func (contract *mockContract) WrapUpEpoch(opts *consortiumCommon.ApplyTransactOpts) error { + return nil +} + +func (contract *mockContract) SubmitBlockReward(opts *consortiumCommon.ApplyTransactOpts) error { + return nil +} + +func (contract *mockContract) Slash(opts *consortiumCommon.ApplyTransactOpts, spoiledValidator common.Address) error { + return nil +} + +func (contract *mockContract) FinalityReward(opts *consortiumCommon.ApplyTransactOpts, votedValidators []common.Address) error { + return nil +} + +func (contract *mockContract) GetValidators(*big.Int) ([]common.Address, error) { + var validatorAddresses []common.Address + for address := range contract.validators { + validatorAddresses = append(validatorAddresses, address) + } + return validatorAddresses, nil +} + +func (contract *mockContract) GetBlsPublicKey(_ *big.Int, address common.Address) (blsCommon.PublicKey, error) { + if key, ok := contract.validators[address]; ok { + if key != nil { + return key, nil + } else { + return nil, errors.New("no BLS public key found") + } + } else { + return nil, errors.New("address is not a validator") + } +} + +func TestGetCheckpointValidatorFromContract(t *testing.T) { + var err error + secretKeys := make([]blsCommon.SecretKey, 3) + for i := 0; i < len(secretKeys); i++ { + secretKeys[i], err = blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err: %s", err) + } + } + + mock := &mockContract{ + validators: map[common.Address]blsCommon.PublicKey{ + common.Address{0x1}: secretKeys[1].PublicKey(), + common.Address{0x2}: nil, + common.Address{0x5}: secretKeys[0].PublicKey(), + common.Address{0x3}: secretKeys[2].PublicKey(), + }, + } + c := Consortium{ + chainConfig: ¶ms.ChainConfig{ + ShillinBlock: big.NewInt(0), + }, + contract: mock, + } + + validatorWithPubs, err := c.getCheckpointValidatorsFromContract(&types.Header{Number: big.NewInt(3)}) + if err != nil { + t.Fatalf("Failed to get checkpoint validators from contract, err: %s", err) + } + + if len(validatorWithPubs) != 3 { + t.Fatalf("Expect returned list, length: %d have: %d", 3, len(validatorWithPubs)) + } + if validatorWithPubs[0].Address != (common.Address{0x1}) { + t.Fatalf("Wrong returned list") + } + if !validatorWithPubs[0].BlsPublicKey.Equals(secretKeys[1].PublicKey()) { + t.Fatalf("Wrong returned list") + } + if validatorWithPubs[1].Address != (common.Address{0x3}) { + t.Fatalf("Wrong returned list") + } + if !validatorWithPubs[1].BlsPublicKey.Equals(secretKeys[2].PublicKey()) { + t.Fatalf("Wrong returned list") + } + if validatorWithPubs[2].Address != (common.Address{0x5}) { + t.Fatalf("Wrong returned list") + } + if !validatorWithPubs[2].BlsPublicKey.Equals(secretKeys[0].PublicKey()) { + t.Fatalf("Wrong returned list") + } +} + +type mockVotePool struct { + vote []*types.VoteEnvelope +} + +func (votePool *mockVotePool) FetchVoteByBlockHash(hash common.Hash) []*types.VoteEnvelope { + return votePool.vote +} + +func TestAssembleFinalityVote(t *testing.T) { + var err error + secretKeys := make([]blsCommon.SecretKey, 10) + for i := 0; i < len(secretKeys); i++ { + secretKeys[i], err = blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err: %s", err) + } + } + + voteData := types.VoteData{ + TargetNumber: 4, + TargetHash: common.Hash{0x1}, + } + digest := voteData.Hash() + + signatures := make([]blsCommon.Signature, 10) + for i := 0; i < len(signatures); i++ { + signatures[i] = secretKeys[i].Sign(digest[:]) + } + + var votes []*types.VoteEnvelope + for i := 0; i < 10; i++ { + votes = append(votes, &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + PublicKey: types.BLSPublicKey(secretKeys[i].PublicKey().Marshal()), + Signature: types.BLSSignature(signatures[i].Marshal()), + Data: &voteData, + }, + }) + } + + mock := mockVotePool{ + vote: votes, + } + c := Consortium{ + chainConfig: ¶ms.ChainConfig{ + ShillinBlock: big.NewInt(0), + }, + votePool: &mock, + } + + var validators []finality.ValidatorWithBlsPub + for i := 0; i < 9; i++ { + validators = append(validators, finality.ValidatorWithBlsPub{ + Address: common.BigToAddress(big.NewInt(int64(i))), + BlsPublicKey: secretKeys[i].PublicKey(), + }) + } + + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, nil, validators, nil) + + header := types.Header{Number: big.NewInt(5)} + extraData := &finality.HeaderExtraData{} + header.Extra = extraData.Encode(true) + c.assembleFinalityVote(&header, snap) + + extraData, err = finality.DecodeExtra(header.Extra, true) + if err != nil { + t.Fatalf("Failed to decode extra data, err: %s", err) + } + + if extraData.HasFinalityVote != 1 { + t.Fatal("Missing finality vote in header") + } + + bitSet := finality.FinalityVoteBitSet(0) + for i := 0; i < 9; i++ { + bitSet.SetBit(i) + } + + if uint64(bitSet) != uint64(extraData.FinalityVotedValidators) { + t.Fatalf( + "Mismatch voted validator, expect %d have %d", + uint64(bitSet), + uint64(extraData.FinalityVotedValidators), + ) + } + + var includedSignatures []blsCommon.Signature + for i := 0; i < 9; i++ { + includedSignatures = append(includedSignatures, signatures[i]) + } + + aggregatedSignature := blst.AggregateSignatures(includedSignatures) + + if !bytes.Equal(aggregatedSignature.Marshal(), extraData.AggregatedFinalityVotes.Marshal()) { + t.Fatal("Mismatch signature") + } +} + +func TestVerifyVote(t *testing.T) { + const numValidator = 3 + var err error + + secretKey := make([]blsCommon.SecretKey, numValidator+1) + for i := 0; i < len(secretKey); i++ { + secretKey[i], err = blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate secret key, err %s", err) + } + } + + valWithBlsPub := make([]finality.ValidatorWithBlsPub, numValidator) + for i := 0; i < len(valWithBlsPub); i++ { + valWithBlsPub[i] = finality.ValidatorWithBlsPub{ + Address: common.BigToAddress(big.NewInt(int64(i))), + BlsPublicKey: secretKey[i].PublicKey(), + } + } + + db := rawdb.NewMemoryDatabase() + genesis := (&core.Genesis{ + Config: params.TestChainConfig, + BaseFee: big.NewInt(params.InitialBaseFee), + }).MustCommit(db) + chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil) + + bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs[:]); err != nil { + panic(err) + } + + snap := newSnapshot(nil, nil, nil, 10, common.Hash{}, nil, valWithBlsPub, nil) + recents, _ := lru.NewARC(inmemorySnapshots) + c := Consortium{ + chainConfig: ¶ms.ChainConfig{ + ShillinBlock: big.NewInt(0), + }, + config: ¶ms.ConsortiumConfig{ + EpochV2: 300, + }, + recents: recents, + } + snap.Hash = bs[0].Hash() + c.recents.Add(snap.Hash, snap) + + // invalid vote number + voteData := types.VoteData{ + TargetNumber: 2, + TargetHash: bs[0].Hash(), + } + signature := secretKey[0].Sign(voteData.Hash().Bytes()) + + vote := types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + PublicKey: types.BLSPublicKey(secretKey[0].PublicKey().Marshal()), + Signature: types.BLSSignature(signature.Marshal()), + Data: &voteData, + }, + } + + err = c.VerifyVote(chain, &vote) + if !errors.Is(err, finality.ErrInvalidTargetNumber) { + t.Errorf("Expect error %v have %v", finality.ErrInvalidTargetNumber, err) + } + + // invalid public key + voteData = types.VoteData{ + TargetNumber: 1, + TargetHash: bs[0].Hash(), + } + signature = secretKey[numValidator].Sign(voteData.Hash().Bytes()) + + vote = types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + PublicKey: types.BLSPublicKey(secretKey[numValidator].PublicKey().Marshal()), + Signature: types.BLSSignature(signature.Marshal()), + Data: &voteData, + }, + } + + err = c.VerifyVote(chain, &vote) + if !errors.Is(err, finality.ErrUnauthorizedFinalityVoter) { + t.Errorf("Expect error %v have %v", finality.ErrUnauthorizedFinalityVoter, err) + } + + // sucessful case + voteData = types.VoteData{ + TargetNumber: 1, + TargetHash: bs[0].Hash(), + } + signature = secretKey[0].Sign(voteData.Hash().Bytes()) + + vote = types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + PublicKey: types.BLSPublicKey(secretKey[0].PublicKey().Marshal()), + Signature: types.BLSSignature(signature.Marshal()), + Data: &voteData, + }, + } + + err = c.VerifyVote(chain, &vote) + if err != nil { + t.Errorf("Expect sucessful verification have %s", err) + } +} diff --git a/consensus/consortium/v2/finality/consortium_header.go b/consensus/consortium/v2/finality/consortium_header.go new file mode 100644 index 0000000000..e19e85ed15 --- /dev/null +++ b/consensus/consortium/v2/finality/consortium_header.go @@ -0,0 +1,292 @@ +package finality + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/params" +) + +const ( + ExtraSeal = crypto.SignatureLength + ExtraVanity = 32 +) + +var ( + // ErrInvalidHasFinalityVote is returned if a block's extra-data contains invalid + // has finality vote byte + ErrInvalidHasFinalityVote = errors.New("invalid has finality vote byte") + + // ErrMissingHasFinalityVote is returned if a block's extra-data section does not seem + // to include 1 byte to determine if the extra data has the finality votes + ErrMissingHasFinalityVote = errors.New("extra-data 1 byte has finality votes missing") + + // ErrMissingFinalityVoteBitSet is returned if a block's extra-data section does not seem + // to include 8 bytes of finality vote bitset + ErrMissingFinalityVoteBitSet = errors.New("extra-data 8 bytes finality votes bitset missing") + + // ErrMissingFinalitySignature is returned if a block's extra-data section does not seem + // to include finality signature + ErrMissingFinalitySignature = errors.New("extra-data finality signature missing") + + // ErrMissingFinalitySignature is returned if the number of finality votes is under + // the threshold + ErrNotEnoughFinalityVote = errors.New("not enough finality vote") + + // ErrFinalitySignatureVerificationFailed is returned if the finality signature verification + // failed + ErrFinalitySignatureVerificationFailed = errors.New("failed to verify finality signature") + + // ErrInvalidFinalityVotedBitSet is returned if the voted validator in bit set is not in + // snapshot validator set + ErrInvalidFinalityVotedBitSet = errors.New("invalid finality voted bit set") + + // ErrUnauthorizedFinalityVoter is returned if finality voter is not in validator set + ErrUnauthorizedFinalityVoter = errors.New("unauthorized finality voter") + + // ErrMissingVanity is returned if a block's extra-data section is shorter than + // 32 bytes, which is required to store the signer vanity. + ErrMissingVanity = errors.New("extra-data 32 byte vanity prefix missing") + + // ErrMissingSignature is returned if a block's extra-data section doesn't seem + // to contain a 65 byte secp256k1 signature. + ErrMissingSignature = errors.New("extra-data 65 byte signature suffix missing") + + // ErrInvalidSpanValidators is returned if a block contains an + // invalid list of validators (i.e. non divisible by 20 bytes). + ErrInvalidSpanValidators = errors.New("invalid validator list on sprint end block") + + // ErrInvalidTargetNumber is returned if the vote contains invalid + // target number + ErrInvalidTargetNumber = errors.New("invalid target number in vote") +) + +type ValidatorWithBlsPub struct { + Address common.Address + BlsPublicKey blsCommon.PublicKey +} + +type savedValidatorWithBlsPub struct { + Address common.Address `json:"address"` + BlsPublicKey string `json:"blsPublicKey,omitempty"` +} + +func (validator *ValidatorWithBlsPub) UnmarshalJSON(input []byte) error { + var ( + savedValidator savedValidatorWithBlsPub + err error + ) + + if err = json.Unmarshal(input, &savedValidator); err != nil { + return err + } + + validator.Address = savedValidator.Address + rawPublicKey, err := hex.DecodeString(savedValidator.BlsPublicKey) + if err != nil { + return err + } + validator.BlsPublicKey, err = blst.PublicKeyFromBytes(rawPublicKey) + if err != nil { + return err + } + return nil +} + +func (validator *ValidatorWithBlsPub) MarshalJSON() ([]byte, error) { + savedValidator := savedValidatorWithBlsPub{ + Address: validator.Address, + } + + if validator.BlsPublicKey != nil { + savedValidator.BlsPublicKey = hex.EncodeToString(validator.BlsPublicKey.Marshal()) + } + + return json.Marshal(&savedValidator) +} + +// CheckpointValidatorAscending implements the sort interface to allow sorting a list +// of checkpoint validator +type CheckpointValidatorAscending []ValidatorWithBlsPub + +func (validator CheckpointValidatorAscending) Len() int { return len(validator) } +func (validator CheckpointValidatorAscending) Less(i, j int) bool { + return bytes.Compare(validator[i].Address[:], validator[j].Address[:]) < 0 +} +func (validator CheckpointValidatorAscending) Swap(i, j int) { + validator[i], validator[j] = validator[j], validator[i] +} + +type FinalityVoteBitSet uint64 + +const finalityVoteBitSetByteLength int = 8 + +func (bitSet *FinalityVoteBitSet) Indices() []int { + var votedValidatorPositions []int + + for i := 0; i < finalityVoteBitSetByteLength*8; i++ { + if uint64(*bitSet)&(1<= finalityVoteBitSetByteLength*8 { + return + } + + *bitSet = FinalityVoteBitSet(uint64(*bitSet) | (1 << index)) +} + +// HeaderExtraData represents the information in the extra data of header, +// this helps to make the code more readable +type HeaderExtraData struct { + Vanity [ExtraVanity]byte // unused in Consortium, filled with zero + HasFinalityVote uint8 // determine if the header extra has the finality vote + FinalityVotedValidators FinalityVoteBitSet // the bit set of validators that vote for finality + AggregatedFinalityVotes blsCommon.Signature // aggregated BLS signatures for finality vote + CheckpointValidators []ValidatorWithBlsPub // validator addresses and BLS public key appended at checkpoint block + Seal [ExtraSeal]byte // the sealing block signature +} + +func (extraData *HeaderExtraData) Encode(isShillin bool) []byte { + var rawBytes []byte + + rawBytes = append(rawBytes, extraData.Vanity[:]...) + if isShillin { + rawBytes = append(rawBytes, extraData.HasFinalityVote) + if extraData.HasFinalityVote == 1 { + rawBytes = binary.LittleEndian.AppendUint64(rawBytes, uint64(extraData.FinalityVotedValidators)) + rawBytes = append(rawBytes, extraData.AggregatedFinalityVotes.Marshal()...) + } + } + for _, validator := range extraData.CheckpointValidators { + rawBytes = append(rawBytes, validator.Address.Bytes()...) + if isShillin { + rawBytes = append(rawBytes, validator.BlsPublicKey.Marshal()...) + } + } + rawBytes = append(rawBytes, extraData.Seal[:]...) + + return rawBytes +} + +func DecodeExtra(rawBytes []byte, isShillin bool) (*HeaderExtraData, error) { + var ( + extraData HeaderExtraData + currentPosition int + err error + ) + + rawBytesLength := len(rawBytes) + if rawBytesLength < ExtraVanity { + return nil, ErrMissingVanity + } + + copy(extraData.Vanity[:], rawBytes[:ExtraVanity]) + currentPosition += ExtraVanity + + if isShillin { + if rawBytesLength-currentPosition < 1 { + return nil, ErrMissingHasFinalityVote + } + + extraData.HasFinalityVote = rawBytes[currentPosition] + currentPosition += 1 + + if extraData.HasFinalityVote != 1 && extraData.HasFinalityVote != 0 { + return nil, ErrInvalidHasFinalityVote + } + + if extraData.HasFinalityVote == 1 { + if rawBytesLength-currentPosition < finalityVoteBitSetByteLength { + return nil, ErrMissingFinalityVoteBitSet + } + extraData.FinalityVotedValidators = FinalityVoteBitSet( + binary.LittleEndian.Uint64(rawBytes[currentPosition : currentPosition+finalityVoteBitSetByteLength]), + ) + currentPosition += finalityVoteBitSetByteLength + + if rawBytesLength-currentPosition < params.BLSSignatureLength { + return nil, ErrMissingFinalitySignature + } + extraData.AggregatedFinalityVotes, err = blst.SignatureFromBytes( + rawBytes[currentPosition : currentPosition+params.BLSSignatureLength], + ) + if err != nil { + return nil, err + } + currentPosition += params.BLSSignatureLength + } + } + + if rawBytesLength-currentPosition < ExtraSeal { + return nil, ErrMissingSignature + } + + checkpointValidatorsLength := rawBytesLength - currentPosition - ExtraSeal + extraData.CheckpointValidators, err = ParseCheckpointData( + rawBytes[currentPosition:currentPosition+checkpointValidatorsLength], + isShillin, + ) + if err != nil { + return nil, err + } + currentPosition += checkpointValidatorsLength + + copy(extraData.Seal[:], rawBytes[currentPosition:]) + + return &extraData, nil +} + +// ParseCheckpointData retrieves the list of validator addresses and finality voter's public keys +// at the checkpoint block +func ParseCheckpointData(checkpointData []byte, isShillin bool) ([]ValidatorWithBlsPub, error) { + var ( + lengthPerValidator int + extraData []ValidatorWithBlsPub + currentPosition int + err error + ) + + if isShillin { + lengthPerValidator = common.AddressLength + params.BLSPubkeyLength + } else { + lengthPerValidator = common.AddressLength + } + + if len(checkpointData)%lengthPerValidator != 0 { + return nil, ErrInvalidSpanValidators + } + + numValidators := len(checkpointData) / lengthPerValidator + extraData = make([]ValidatorWithBlsPub, numValidators) + for i := 0; i < numValidators; i++ { + copy( + extraData[i].Address[:], + checkpointData[currentPosition:currentPosition+common.AddressLength], + ) + currentPosition += common.AddressLength + + if isShillin { + extraData[i].BlsPublicKey, err = blst.PublicKeyFromBytes( + checkpointData[currentPosition : currentPosition+params.BLSPubkeyLength], + ) + if err != nil { + return nil, err + } + currentPosition += params.BLSPubkeyLength + } + } + + return extraData, nil +} diff --git a/consensus/consortium/v2/snapshot.go b/consensus/consortium/v2/snapshot.go index e14c3386eb..87aa75a406 100644 --- a/consensus/consortium/v2/snapshot.go +++ b/consensus/consortium/v2/snapshot.go @@ -3,14 +3,15 @@ package v2 import ( "bytes" "encoding/json" - "errors" "math/big" "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" v1 "github.com/ethereum/go-ethereum/consensus/consortium/v1" + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" "github.com/ethereum/go-ethereum/core/types" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/params" @@ -24,10 +25,15 @@ type Snapshot struct { ethAPI *ethapi.PublicBlockChainAPI sigCache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover - Number uint64 `json:"number"` // Block number where the snapshot was created - Hash common.Hash `json:"hash"` // Block hash where the snapshot was created - Validators map[common.Address]struct{} `json:"validators"` // Set of authorized validators at this moment - Recents map[uint64]common.Address `json:"recents"` // Set of recent validators for spam protections + Number uint64 `json:"number"` // Block number where the snapshot was created + Hash common.Hash `json:"hash"` // Block hash where the snapshot was created + Validators map[common.Address]struct{} `json:"validators,omitempty"` // Set of authorized validators at this moment before Shillin + Recents map[uint64]common.Address `json:"recents"` // Set of recent validators for spam protections + + // Finality additional fields + ValidatorsWithBlsPub []finality.ValidatorWithBlsPub `json:"validatorWithBlsPub,omitempty"` // Array of sorted authorized validators and BLS public keys after Shillin + JustifiedBlockNumber uint64 `json:"justifiedBlockNumber,omitempty"` // The justified block number + JustifiedBlockHash common.Hash `json:"justifiedBlockHash,omitempty"` // The justified block hash } // validatorsAscending implements the sort interface to allow sorting a list of addresses @@ -40,7 +46,16 @@ func (s validatorsAscending) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // newSnapshot creates a new snapshot with the specified startup parameters. This // method does not initialize the set of recent validators, so only ever use if for // the genesis block -func newSnapshot(chainConfig *params.ChainConfig, config *params.ConsortiumConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, validators []common.Address, ethAPI *ethapi.PublicBlockChainAPI) *Snapshot { +func newSnapshot( + chainConfig *params.ChainConfig, + config *params.ConsortiumConfig, + sigcache *lru.ARCCache, + number uint64, + hash common.Hash, + validators []common.Address, + valWithBlsPub []finality.ValidatorWithBlsPub, + ethAPI *ethapi.PublicBlockChainAPI, +) *Snapshot { snap := &Snapshot{ chainConfig: chainConfig, config: config, @@ -51,9 +66,14 @@ func newSnapshot(chainConfig *params.ChainConfig, config *params.ConsortiumConfi Recents: make(map[uint64]common.Address), Validators: make(map[common.Address]struct{}), } + for _, v := range validators { snap.Validators[v] = struct{}{} } + + if valWithBlsPub != nil { + snap.ValidatorsWithBlsPub = valWithBlsPub + } return snap } @@ -94,19 +114,29 @@ func (s *Snapshot) store(db ethdb.Database) error { // copy creates a deep copy of the snapshot. func (s *Snapshot) copy() *Snapshot { cpy := &Snapshot{ - chainConfig: s.chainConfig, - config: s.config, - ethAPI: s.ethAPI, - sigCache: s.sigCache, - Number: s.Number, - Hash: s.Hash, - Validators: make(map[common.Address]struct{}), - Recents: make(map[uint64]common.Address), + chainConfig: s.chainConfig, + config: s.config, + ethAPI: s.ethAPI, + sigCache: s.sigCache, + Number: s.Number, + Hash: s.Hash, + Recents: make(map[uint64]common.Address), + JustifiedBlockNumber: s.JustifiedBlockNumber, + JustifiedBlockHash: s.JustifiedBlockHash, } - for v := range s.Validators { - cpy.Validators[v] = struct{}{} + if s.Validators != nil { + cpy.Validators = make(map[common.Address]struct{}) + for v := range s.Validators { + cpy.Validators[v] = struct{}{} + } + } + + if s.ValidatorsWithBlsPub != nil { + cpy.ValidatorsWithBlsPub = make([]finality.ValidatorWithBlsPub, len(s.ValidatorsWithBlsPub)) + copy(cpy.ValidatorsWithBlsPub, s.ValidatorsWithBlsPub) } + for block, v := range s.Recents { cpy.Recents[block] = v } @@ -143,7 +173,7 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea for _, header := range headers { number := header.Number.Uint64() // Delete the oldest validators from the recent list to allow it signing again - if limit := uint64(len(snap.Validators)/2 + 1); number >= limit { + if limit := uint64(len(snap.validators())/2 + 1); number >= limit { delete(snap.Recents, number-limit) } // Resolve the authorization key and check against signers @@ -161,7 +191,7 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea if err != nil { return nil, err } - if _, ok := snap.Validators[validator]; !ok { + if !snap.inInValidatorSet(validator) { return nil, errUnauthorizedValidator } for _, recent := range snap.Recents { @@ -170,32 +200,61 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea } } snap.Recents[number] = validator + + if chain.Config().IsShillin(header.Number) { + extraData, err := finality.DecodeExtra(header.Extra, true) + if err != nil { + return nil, err + } + // When getting here, the header may not go through the verification yet, + // so the finality votes may not be verified. Later, when the header + // verification happens, this header may be rejected, the only impact is + // if the snapshot is at checkpoint, the garbage snapshot is stored to + // disk. Because we already check whether the sealer is in validator set + // already and the impact is not high, we simply trust the finality vote + // here without verification. + if extraData.HasFinalityVote == 1 { + snap.JustifiedBlockNumber = header.Number.Uint64() - 1 + snap.JustifiedBlockHash = header.ParentHash + } + } + // Change the validator set base on the size of the validators set - if number > 0 && number%s.config.EpochV2 == uint64(len(snap.Validators)/2) { + if number > 0 && number%s.config.EpochV2 == uint64(len(snap.validators())/2) { // Get the most recent checkpoint header - checkpointHeader := FindAncientHeader(header, uint64(len(snap.Validators)/2), chain, parents) + checkpointHeader := FindAncientHeader(header, uint64(len(snap.validators())/2), chain, parents) if checkpointHeader == nil { return nil, consensus.ErrUnknownAncestor } - validatorBytes := checkpointHeader.Extra[extraVanity : len(checkpointHeader.Extra)-extraSeal] + isShillin := chain.Config().IsShillin(checkpointHeader.Number) // Get validator set from headers and use that for new validator set - newValArr, err := ParseValidators(validatorBytes) + extraData, err := finality.DecodeExtra(checkpointHeader.Extra, isShillin) if err != nil { return nil, err } - newVals := make(map[common.Address]struct{}, len(newValArr)) - for _, val := range newValArr { - newVals[val] = struct{}{} - } - oldLimit := len(snap.Validators)/2 + 1 - newLimit := len(newVals)/2 + 1 + + oldLimit := len(snap.validators())/2 + 1 + newLimit := len(extraData.CheckpointValidators)/2 + 1 if newLimit < oldLimit { for i := 0; i < oldLimit-newLimit; i++ { delete(snap.Recents, number-uint64(newLimit)-uint64(i)) } } - snap.Validators = newVals + + if isShillin { + // The validator information in checkpoint header is already sorted, + // we don't need to sort here + snap.ValidatorsWithBlsPub = make([]finality.ValidatorWithBlsPub, len(extraData.CheckpointValidators)) + copy(snap.ValidatorsWithBlsPub, extraData.CheckpointValidators) + snap.Validators = nil + } else { + snap.Validators = make(map[common.Address]struct{}) + for _, validator := range extraData.CheckpointValidators { + snap.Validators[validator.Address] = struct{}{} + } + snap.ValidatorsWithBlsPub = nil + } } } snap.Number += uint64(len(headers)) @@ -205,12 +264,42 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea // validators retrieves the list of validators in ascending order. func (s *Snapshot) validators() []common.Address { - validators := make([]common.Address, 0, len(s.Validators)) - for v := range s.Validators { - validators = append(validators, v) + if s.Validators != nil { + validators := make([]common.Address, 0, len(s.Validators)) + for v := range s.Validators { + validators = append(validators, v) + } + sort.Sort(validatorsAscending(validators)) + return validators + } else { + // After the Shillin the array of validators in snapshot is + // guaranteed to be sorted so we don't need to sort here + addresses := make([]common.Address, len(s.ValidatorsWithBlsPub)) + for i, validator := range s.ValidatorsWithBlsPub { + addresses[i] = validator.Address + } + return addresses + } +} + +func (s *Snapshot) inInValidatorSet(address common.Address) bool { + validatorSet := s.validators() + for _, validator := range validatorSet { + if validator == address { + return true + } } - sort.Sort(validatorsAscending(validators)) - return validators + return false +} + +func (s *Snapshot) inBlsPublicKeySet(publicKey blsCommon.PublicKey) bool { + for _, validator := range s.ValidatorsWithBlsPub { + if validator.BlsPublicKey.Equals(publicKey) { + return true + } + } + + return false } // inturn returns if a validator at a given block height is in-turn or not. @@ -253,7 +342,7 @@ func (s *Snapshot) supposeValidator() common.Address { func (s *Snapshot) IsRecentlySigned(validator common.Address) bool { for seen, recent := range s.Recents { if recent == validator { - if limit := uint64(len(s.Validators)/2 + 1); seen > s.Number+1-limit { + if limit := uint64(len(s.validators())/2 + 1); seen > s.Number+1-limit { return true } } @@ -261,21 +350,6 @@ func (s *Snapshot) IsRecentlySigned(validator common.Address) bool { return false } -// ParseValidators retrieves the list of validators -func ParseValidators(validatorsBytes []byte) ([]common.Address, error) { - if len(validatorsBytes)%validatorBytesLength != 0 { - return nil, errors.New("invalid validators bytes") - } - n := len(validatorsBytes) / validatorBytesLength - result := make([]common.Address, n) - for i := 0; i < n; i++ { - address := make([]byte, validatorBytesLength) - copy(address, validatorsBytes[i*validatorBytesLength:(i+1)*validatorBytesLength]) - result[i] = common.BytesToAddress(address) - } - return result, nil -} - // FindAncientHeader finds the most recent checkpoint header // Travel through the candidateParents to find the ancient header. // If all headers in candidateParents have the number is larger than the header number, diff --git a/console/console_test.go b/console/console_test.go index 71c80c20fa..50bb82c531 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -289,7 +289,7 @@ func TestPrettyError(t *testing.T) { defer tester.Close(t) tester.console.Evaluate("throw 'hello'") - want := jsre.ErrorColor("hello") + "\n\tat :1:7(1)\n\n" + want := jsre.ErrorColor("hello") + "\n\tat :1:1(1)\n\n" if output := tester.output.String(); output != want { t.Fatalf("pretty error mismatch: have %s, want %s", output, want) } diff --git a/core/blockchain.go b/core/blockchain.go index 676d8da8fd..053dd464f7 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -465,6 +465,50 @@ func (bc *BlockChain) StartDoubleSignMonitor() { } } +func (bc *BlockChain) StartFinalityVoteMonitor() { + log.Info("Starting finality vote monitor") + + consensus, ok := bc.engine.(consensus.FastFinalityPoSA) + if !ok { + log.Error("Not a fast finality consensus, stop finality vote monitor") + return + } + finalityVoteMonitor, err := monitor.NewFinalityVoteMonitor(bc, consensus) + if err != nil { + log.Error("Finality vote monitor creation failed", "err", err) + return + } + + chainEventCh := make(chan ChainEvent) + chainSideEventCh := make(chan ChainSideEvent) + + chainEventSub := bc.SubscribeChainEvent(chainEventCh) + defer chainEventSub.Unsubscribe() + chainSideEventSub := bc.SubscribeChainSideEvent(chainSideEventCh) + defer chainSideEventSub.Unsubscribe() + + for { + select { + case ev := <-chainEventCh: + block := ev.Block + if bc.chainConfig.IsShillin(block.Number()) { + finalityVoteMonitor.CheckFinalityVote(block) + } + case ev := <-chainSideEventCh: + block := ev.Block + if bc.chainConfig.IsShillin(block.Number()) { + finalityVoteMonitor.CheckFinalityVote(block) + } + case <-chainEventSub.Err(): + return + case <-chainSideEventSub.Err(): + return + case <-bc.quit: + return + } + } +} + // empty returns an indicator whether the blockchain is empty. // Note, it's a special case that we connect a non-empty ancient // database with an empty node, so that we can plugin the ancient @@ -1279,6 +1323,40 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. return bc.writeBlockWithState(block, receipts, logs, internalTxs, state, emitHeadEvent) } +// reorgNeeded determines if the external chain is better than the local chain so reorg is needed +func (bc *BlockChain) reorgNeeded(localBlock *types.Block, localTd *big.Int, externBlock *types.Block, externTd *big.Int) bool { + if consensusEngine, ok := bc.engine.(consensus.FastFinalityPoSA); ok { + localJustifiedBlockNumber, _ := consensusEngine.GetJustifiedBlock(bc, localBlock.NumberU64(), localBlock.Hash()) + externJustifiedBlockNumber, _ := consensusEngine.GetJustifiedBlock(bc, externBlock.NumberU64(), externBlock.Hash()) + + if externJustifiedBlockNumber > localJustifiedBlockNumber { + return true + } else if externJustifiedBlockNumber < localJustifiedBlockNumber { + return false + } + } + + // If the total difficulty is higher than our known, add it to the canonical chain + // Second clause in the if statement reduces the vulnerability to selfish mining. + // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf + reorg := externTd.Cmp(localTd) > 0 + if !reorg && externTd.Cmp(localTd) == 0 { + // Split same-difficulty blocks by number, then preferentially select + // the block generated by the local miner as the canonical block. + if externBlock.NumberU64() < localBlock.NumberU64() { + reorg = true + } else if externBlock.NumberU64() == localBlock.NumberU64() { + var localPreserve, externPreserve bool + if bc.shouldPreserve != nil { + localPreserve, externPreserve = bc.shouldPreserve(localBlock), bc.shouldPreserve(externBlock) + } + reorg = !localPreserve && (externPreserve || mrand.Float64() < 0.5) + } + } + + return reorg +} + // writeBlockWithState writes the block and all associated state to the database, // but is expects the chain mutex to be held. func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, internalTxs []*types.InternalTransaction, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, dirtyAccounts []*types.DirtyStateAccount, err error) { @@ -1368,25 +1446,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } } } - // If the total difficulty is higher than our known, add it to the canonical chain - // Second clause in the if statement reduces the vulnerability to selfish mining. - // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf - reorg := externTd.Cmp(localTd) > 0 - currentBlock = bc.CurrentBlock() - if !reorg && externTd.Cmp(localTd) == 0 { - // Split same-difficulty blocks by number, then preferentially select - // the block generated by the local miner as the canonical block. - if block.NumberU64() < currentBlock.NumberU64() { - reorg = true - } else if block.NumberU64() == currentBlock.NumberU64() { - var currentPreserve, blockPreserve bool - if bc.shouldPreserve != nil { - currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) - } - reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) - } - } - if reorg { + if bc.reorgNeeded(currentBlock, localTd, block, externTd) { // Reorganise the chain if the parent is not the head block if block.ParentHash() != currentBlock.Hash() { log.Info("[reorg][writeBlockWithState]", diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 42120ea9ba..7ea6452e70 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -49,6 +49,18 @@ func (bc *BlockChain) CurrentFastBlock() *types.Block { return bc.currentFastBlock.Load().(*types.Block) } +func (bc *BlockChain) FinalizedBlock() *types.Block { + if consensusEngine, ok := bc.engine.(consensus.FastFinalityPoSA); ok { + currentBlock := bc.CurrentBlock() + finalizedNumber, finalizedHash := consensusEngine.GetFinalizedBlock(bc, currentBlock.NumberU64(), currentBlock.Hash()) + if finalizedNumber == 0 { + return nil + } + return rawdb.ReadBlock(bc.db, finalizedHash, finalizedNumber) + } + return nil +} + // HasHeader checks if a block header is present in the database or not, caching // it if present. func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { diff --git a/core/events.go b/core/events.go index ffd6477262..02b3756c3e 100644 --- a/core/events.go +++ b/core/events.go @@ -30,6 +30,9 @@ type NewMinedBlockEvent struct{ Block *types.Block } // RemovedLogsEvent is posted when a reorg happens type RemovedLogsEvent struct{ Logs []*types.Log } +// NewVoteEvent is posted when a batch of votes enters the vote pool. +type NewVoteEvent struct{ Vote *types.VoteEnvelope } + type ChainEvent struct { Block *types.Block Hash common.Hash diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 079e335fa6..cd85a0a8a9 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -138,3 +138,29 @@ func PopUncleanShutdownMarker(db ethdb.KeyValueStore) { log.Warn("Failed to clear unclean-shutdown marker", "err", err) } } + +// ReadHighestFinalityVote read the highest finality vote height +func ReadHighestFinalityVote(db ethdb.KeyValueReader) *uint64 { + var highestFinalityVote uint64 + + enc, _ := db.Get(highestFinalityVoteKey) + if len(enc) == 0 { + return nil + } + + if err := rlp.DecodeBytes(enc, &highestFinalityVote); err != nil { + return nil + } + return &highestFinalityVote +} + +// WriteHighestFinalityVote write the highest finality vote height +func WriteHighestFinalityVote(db ethdb.KeyValueStore, highestFinalityVote uint64) { + enc, err := rlp.EncodeToBytes(highestFinalityVote) + if err != nil { + log.Crit("Failed to encode highest finality vote", "err", err) + } + if err = db.Put(highestFinalityVoteKey, enc); err != nil { + log.Crit("Failed to store highest finality vote", "err", err) + } +} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index c5af776672..f244950902 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -395,7 +395,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, - uncleanShutdownKey, badBlockKey, + uncleanShutdownKey, badBlockKey, highestFinalityVoteKey, storeInternalTxsEnabledKey, + snapshotSyncStatusKey, } { if bytes.Equal(key, meta) { metadata.Add(size) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 1ea77a5664..74fb88f63f 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -78,6 +78,9 @@ var ( // storeInternalTxsEnabledKey flags that internal transactions will be stored into db storeInternalTxsEnabledKey = []byte("storeInternalTxsEnabled") + // lastFinalityVoteKey tracks the highest finality vote + highestFinalityVoteKey = []byte("HighestFinalityVote") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 25c3730e3f..472c125b77 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -127,6 +127,9 @@ func (p *triePrefetcher) copy() *triePrefetcher { // If the prefetcher is already a copy, duplicate the data if p.fetches != nil { for root, fetch := range p.fetches { + if fetch == nil { + continue + } copy.fetches[root] = p.db.CopyTrie(fetch) } return copy diff --git a/core/types/vote.go b/core/types/vote.go new file mode 100644 index 0000000000..895e3ad753 --- /dev/null +++ b/core/types/vote.go @@ -0,0 +1,85 @@ +package types + +import ( + "sync/atomic" + + "github.com/ethereum/go-ethereum/params" + + "github.com/ethereum/go-ethereum/crypto/bls" + "github.com/pkg/errors" + + "github.com/ethereum/go-ethereum/common" +) + +type BLSPublicKey [params.BLSPubkeyLength]byte +type BLSSignature [params.BLSSignatureLength]byte +type ValidatorsBitSet uint64 + +// VoteData represents the vote range that validator voted for fast finality. +type VoteData struct { + TargetNumber uint64 // The target block number which validator wants to vote for. + TargetHash common.Hash // The block hash of the target block. +} + +// Hash returns the hash of the vote data. +func (d *VoteData) Hash() common.Hash { return rlpHash(d) } + +// RawVoteEnvelope is VoteEnvelop without cached hash +type RawVoteEnvelope struct { + PublicKey BLSPublicKey // The BLS public key of the validator. + Signature BLSSignature // Validator's signature for the vote data. + Data *VoteData // The vote data for fast finality. +} + +// VoteEnvelope represents the vote of a single validator. +type VoteEnvelope struct { + RawVoteEnvelope + + // caches + hash atomic.Value +} + +func (v *VoteEnvelope) Raw() *RawVoteEnvelope { + return &v.RawVoteEnvelope +} + +// Hash returns the vote's hash. +func (v *VoteEnvelope) Hash() common.Hash { + if hash := v.hash.Load(); hash != nil { + return hash.(common.Hash) + } + + h := v.calcVoteHash() + v.hash.Store(h) + return h +} + +func (v *VoteEnvelope) calcVoteHash() common.Hash { + vote := struct { + PublicKey BLSPublicKey + Signature BLSSignature + Data *VoteData + }{v.PublicKey, v.Signature, v.Data} + return rlpHash(vote) +} + +func (b BLSPublicKey) Bytes() []byte { return b[:] } + +// Verify vote using BLS. +func (vote *VoteEnvelope) Verify() error { + blsPubKey, err := bls.PublicKeyFromBytes(vote.PublicKey[:]) + if err != nil { + return errors.Wrap(err, "convert public key from bytes to bls failed") + } + + sig, err := bls.SignatureFromBytes(vote.Signature[:]) + if err != nil { + return errors.Wrap(err, "invalid signature") + } + + voteDataHash := vote.Data.Hash() + if !sig.Verify(blsPubKey, voteDataHash[:]) { + return errors.New("verify bls signature failed.") + } + return nil +} diff --git a/core/vm/consortium_precompiled_contracts.go b/core/vm/consortium_precompiled_contracts.go index 7b1b8e987e..28f6cea120 100644 --- a/core/vm/consortium_precompiled_contracts.go +++ b/core/vm/consortium_precompiled_contracts.go @@ -16,7 +16,10 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "golang.org/x/crypto/sha3" ) @@ -27,6 +30,7 @@ var ( consortiumVerifyHeadersAbi = `[{"outputs":[],"name":"getHeader","inputs":[{"internalType":"uint256","name":"chainId","type":"uint256"},{"internalType":"bytes32","name":"parentHash","type":"bytes32"},{"internalType":"bytes32","name":"ommersHash","type":"bytes32"},{"internalType":"address","name":"coinbase","type":"address"},{"internalType":"bytes32","name":"stateRoot","type":"bytes32"},{"internalType":"bytes32","name":"transactionsRoot","type":"bytes32"},{"internalType":"bytes32","name":"receiptsRoot","type":"bytes32"},{"internalType":"uint8[256]","name":"logsBloom","type":"uint8[256]"},{"internalType":"uint256","name":"difficulty","type":"uint256"},{"internalType":"uint256","name":"number","type":"uint256"},{"internalType":"uint64","name":"gasLimit","type":"uint64"},{"internalType":"uint64","name":"gasUsed","type":"uint64"},{"internalType":"uint64","name":"timestamp","type":"uint64"},{"internalType":"bytes","name":"extraData","type":"bytes"},{"internalType":"bytes32","name":"mixHash","type":"bytes32"},{"internalType":"uint64","name":"nonce","type":"uint64"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"consensusAddr","type":"address"},{"internalType":"bytes","name":"header1","type":"bytes"},{"internalType":"bytes","name":"header2","type":"bytes"}],"name":"validatingDoubleSignProof","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}]` consortiumPickValidatorSetAbi = `[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address[]","name":"_candidates","type":"address[]"},{"internalType":"uint256[]","name":"_weights","type":"uint256[]"},{"internalType":"uint256[]","name":"_trustedWeights","type":"uint256[]"},{"internalType":"uint256","name":"_maxValidatorNumber","type":"uint256"},{"internalType":"uint256","name":"_maxPrioritizedValidatorNumber","type":"uint256"}],"name":"pickValidatorSet","outputs":[{"internalType":"address[]","name":"_validators","type":"address[]"}],"stateMutability":"view","type":"function"}]` getDoubleSignSlashingConfigsAbi = `[{"inputs":[],"name":"getDoubleSignSlashingConfigs","outputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]` + validateFinalityVoteProofAbi = `[{"inputs":[{"internalType":"bytes","name":"voterPublicKey","type":"bytes"},{"internalType":"uint256","name":"targetBlockNumber","type":"uint256"},{"internalType":"bytes32[2]","name":"targetBlockHash","type":"bytes32[2]"},{"internalType":"bytes[][2]","name":"listOfPublicKey","type":"bytes[][2]"},{"internalType":"bytes[2]","name":"aggregatedSignature","type":"bytes[2]"}],"name":"validateFinalityVoteProof","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}]` ) const ( @@ -39,6 +43,9 @@ const ( getHeader = "getHeader" getDoubleSignSlashingConfigs = "getDoubleSignSlashingConfigs" extraVanity = 32 + + validateFinalityVoteProof = "validateFinalityVoteProof" + maxBlsPublicKeyListLength = 100 ) func PrecompiledContractsConsortium(caller ContractRef, evm *EVM) map[common.Address]PrecompiledContract { @@ -47,6 +54,7 @@ func PrecompiledContractsConsortium(caller ContractRef, evm *EVM) map[common.Add common.BytesToAddress([]byte{102}): &consortiumValidatorSorting{caller: caller, evm: evm}, common.BytesToAddress([]byte{103}): &consortiumVerifyHeaders{caller: caller, evm: evm}, common.BytesToAddress([]byte{104}): &consortiumPickValidatorSet{caller: caller, evm: evm}, + common.BytesToAddress([]byte{105}): &consortiumValidateFinalityProof{caller: caller, evm: evm}, } } @@ -521,3 +529,122 @@ func encodeSigHeader(w io.Writer, header *types.Header, chainId *big.Int) { panic("can't encode: " + err.Error()) } } + +type consortiumValidateFinalityProof struct { + caller ContractRef + evm *EVM +} + +func (contract *consortiumValidateFinalityProof) RequiredGas(input []byte) uint64 { + return params.ValidateFinalityProofGas +} + +func (contract *consortiumValidateFinalityProof) Run(input []byte) ([]byte, error) { + // These 2 fields are nil in testing only + if contract.caller != nil && contract.evm != nil { + if contract.evm.ChainConfig().ConsortiumV2Contracts == nil { + return nil, errors.New("cannot find consortium v2 contracts") + } + if !contract.evm.ChainConfig().ConsortiumV2Contracts.IsSystemContract(contract.caller.Address()) { + return nil, errors.New("unauthorized sender") + } + } + + _, method, args, err := loadMethodAndArgs(validateFinalityVoteProofAbi, input) + if err != nil { + return nil, err + } + if method.Name != validateFinalityVoteProof { + return nil, errors.New("invalid method") + } + if len(args) != 5 { + return nil, fmt.Errorf("invalid arguments, expect 5 got %d", len(args)) + } + + rawVoterPublicKey, ok := args[0].([]byte) + if !ok { + return nil, errors.New("invalid voter public key") + } + + targetBlockNumber, ok := args[1].(*big.Int) + if !ok { + return nil, errors.New("invalid target block number") + } + if !targetBlockNumber.IsUint64() { + return nil, errors.New("malformed target block number") + } + + targetBlockHashes, ok := args[2].([2][32]byte) + if !ok { + return nil, errors.New("invalid target block hashes") + } + + if targetBlockHashes[0] == targetBlockHashes[1] { + return nil, errors.New("block hash is the same") + } + + listOfRawPublicKey, ok := args[3].([2][][]byte) + if !ok { + return nil, errors.New("invalid target block number") + } + + rawAggregatedSignatures, ok := args[4].([2][]byte) + if !ok { + return nil, errors.New("invalid aggregated signature") + } + + voterPublicKey, err := blst.PublicKeyFromBytes(rawVoterPublicKey) + if err != nil { + return nil, errors.New("malformed voter public key") + } + + var listOfPublicKey [2][]blsCommon.PublicKey + for block := range listOfRawPublicKey { + voterInPublicKeyList := false + for _, rawKey := range listOfRawPublicKey[block] { + publicKey, err := blst.PublicKeyFromBytes(rawKey) + if err != nil { + return nil, errors.New("malformed public key in list of public keys") + } + + if publicKey.Equals(voterPublicKey) { + voterInPublicKeyList = true + } + + listOfPublicKey[block] = append(listOfPublicKey[block], publicKey) + } + + if !voterInPublicKeyList { + return nil, errors.New("reported voter does not in public key list") + } + } + + for _, list := range listOfPublicKey { + if len(list) > maxBlsPublicKeyListLength { + return nil, errors.New("public key list is too long") + } + } + + var aggregatedSignature [2]blsCommon.Signature + for block, rawSignature := range rawAggregatedSignatures { + signature, err := blst.SignatureFromBytes(rawSignature) + if err != nil { + return nil, errors.New("malformed signature") + } + + aggregatedSignature[block] = signature + } + + for block := 0; block < 2; block++ { + voteData := types.VoteData{ + TargetNumber: targetBlockNumber.Uint64(), + TargetHash: targetBlockHashes[block], + } + digest := voteData.Hash() + if !aggregatedSignature[block].FastAggregateVerify(listOfPublicKey[block], digest) { + return nil, errors.New("failed to verify signature") + } + } + + return method.Outputs.Pack(true) +} diff --git a/core/vm/consortium_precompiled_contracts_test.go b/core/vm/consortium_precompiled_contracts_test.go index 05ca2ad4f9..a7106b2829 100644 --- a/core/vm/consortium_precompiled_contracts_test.go +++ b/core/vm/consortium_precompiled_contracts_test.go @@ -14,6 +14,8 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" "github.com/ethereum/go-ethereum/params" ) @@ -1725,3 +1727,243 @@ func addressesToByte(addresses []common.Address) [][]byte { return result } + +func TestValidateFinalityVoteProof(t *testing.T) { + contract := consortiumValidateFinalityProof{} + + contractAbi, err := abi.JSON(strings.NewReader(validateFinalityVoteProofAbi)) + if err != nil { + t.Fatalf("Failed to parse ABI, err %s", err) + } + + var secretKey [3]blsCommon.SecretKey + for i := 0; i < 3; i++ { + secretKey[i], err = blst.RandKey() + if err != nil { + t.Fatalf("Failed to generate key, err %s", err) + } + } + var blockNumber uint64 = 1 + blockHash1 := common.Hash{0x1} + voteData1 := types.VoteData{ + TargetNumber: blockNumber, + TargetHash: blockHash1, + } + digest1 := voteData1.Hash() + blockHash2 := common.Hash{0x2} + voteData2 := types.VoteData{ + TargetNumber: blockNumber, + TargetHash: blockHash2, + } + digest2 := voteData2.Hash() + + targetBlockHashes := [2]common.Hash{blockHash1, blockHash1} + listOfPublicKeys := [2][][]byte{ + { + secretKey[0].PublicKey().Marshal(), + }, + { + secretKey[0].PublicKey().Marshal(), + }, + } + voterPublicKey := secretKey[0].PublicKey().Marshal() + aggregatedSignature := [2][]byte{ + secretKey[0].Sign(digest1[:]).Marshal(), + secretKey[0].Sign(digest1[:]).Marshal(), + } + + input, err := contractAbi.Pack( + validateFinalityVoteProof, + voterPublicKey, + new(big.Int).Add(new(big.Int).SetUint64(1<<64-1), common.Big1), + targetBlockHashes, + listOfPublicKeys, + aggregatedSignature, + ) + if err != nil { + t.Fatalf("Failed to pack contract input, err: %s", err) + } + + _, err = contract.Run(input) + if err == nil || err.Error() != "malformed target block number" { + t.Fatalf("Expect to get error %s have %s", "malformed target block number", err) + } + + input, err = contractAbi.Pack( + validateFinalityVoteProof, + voterPublicKey, + big.NewInt(int64(blockNumber)), + targetBlockHashes, + listOfPublicKeys, + aggregatedSignature, + ) + if err != nil { + t.Fatalf("Failed to pack contract input, err: %s", err) + } + + _, err = contract.Run(input) + if err == nil || err.Error() != "block hash is the same" { + t.Fatalf("Expect to get error %s have %s", "block hash is the same", err) + } + + targetBlockHashes = [2]common.Hash{ + blockHash1, + blockHash2, + } + aggregatedSignature = [2][]byte{ + secretKey[0].Sign(digest1[:]).Marshal(), + secretKey[1].Sign(digest2[:]).Marshal(), + } + listOfPublicKeys = [2][][]byte{ + { + secretKey[0].PublicKey().Marshal(), + }, + { + secretKey[1].PublicKey().Marshal(), + }, + } + + input, err = contractAbi.Pack( + validateFinalityVoteProof, + voterPublicKey, + big.NewInt(int64(blockNumber)), + targetBlockHashes, + listOfPublicKeys, + aggregatedSignature, + ) + if err != nil { + t.Fatalf("Failed to pack contract input, err: %s", err) + } + + _, err = contract.Run(input) + if err == nil || err.Error() != "reported voter does not in public key list" { + t.Fatalf("Expect to get error %s have %s", "reported voter does not in public key list", err) + } + + aggregatedSignature = [2][]byte{ + blst.AggregateSignatures([]blsCommon.Signature{ + secretKey[0].Sign(digest1[:]), + secretKey[1].Sign(digest1[:]), + }).Marshal(), + blst.AggregateSignatures([]blsCommon.Signature{ + secretKey[0].Sign(digest2[:]), + secretKey[2].Sign(digest2[:]), + }).Marshal(), + } + + listOfPublicKeys = [2][][]byte{ + { + secretKey[0].PublicKey().Marshal(), + secretKey[1].PublicKey().Marshal(), + }, + { + secretKey[0].PublicKey().Marshal(), + secretKey[2].PublicKey().Marshal(), + }, + } + + input, err = contractAbi.Pack( + validateFinalityVoteProof, + voterPublicKey, + big.NewInt(int64(blockNumber)), + targetBlockHashes, + listOfPublicKeys, + aggregatedSignature, + ) + if err != nil { + t.Fatalf("Failed to pack contract input, err: %s", err) + } + + rawReturn, err := contract.Run(input) + if err != nil { + t.Fatalf("Expect to successfully verify proof, get %s", err) + } + + ret, err := contractAbi.Unpack(validateFinalityVoteProof, rawReturn) + if err != nil { + t.Fatalf("Failed to unpack output, err: %s", err) + } + + returnedBool := (ret[0]).(bool) + if returnedBool != true { + t.Fatalf("Expect the returned value to be true, get %v", returnedBool) + } +} + +func BenchmarkPrecompiledValidateFinalityVoteProof(b *testing.B) { + contractAbi, err := abi.JSON(strings.NewReader(validateFinalityVoteProofAbi)) + if err != nil { + b.Fatalf("Failed to parse ABI, err %s", err) + } + + var secretKeys []blsCommon.SecretKey + for i := 0; i < 200; i++ { + key, err := blst.RandKey() + if err != nil { + b.Fatalf("Failed to generate secret key") + } + + secretKeys = append(secretKeys, key) + } + blockNumber := 10000 + blockHash1 := crypto.Keccak256Hash([]byte{'t', 'e', 's', 't'}) + vote := types.VoteData{ + TargetNumber: uint64(blockNumber), + TargetHash: blockHash1, + } + digest1 := vote.Hash() + + blockHash2 := crypto.Keccak256Hash([]byte{'t', 'e', 's', 't', '2'}) + vote = types.VoteData{ + TargetNumber: uint64(blockNumber), + TargetHash: blockHash2, + } + digest2 := vote.Hash() + + var signature [100]blsCommon.Signature + var listOfPublicKey [2][][]byte + for i := 0; i < 100; i++ { + signature[i] = secretKeys[i].Sign(digest1[:]) + listOfPublicKey[0] = append(listOfPublicKey[0], secretKeys[i].PublicKey().Marshal()) + } + aggregatedSignature1 := blst.AggregateSignatures(signature[:]) + + signature[0] = secretKeys[0].Sign(digest2[:]) + listOfPublicKey[1] = append(listOfPublicKey[1], secretKeys[0].PublicKey().Marshal()) + for i := 101; i < 200; i++ { + signature[i-100] = secretKeys[i].Sign(digest2[:]) + listOfPublicKey[1] = append(listOfPublicKey[1], secretKeys[i].PublicKey().Marshal()) + } + aggregatedSignature2 := blst.AggregateSignatures(signature[:]) + + input, err := contractAbi.Pack( + validateFinalityVoteProof, + secretKeys[0].PublicKey().Marshal(), + big.NewInt(int64(blockNumber)), + [2]common.Hash{ + blockHash1, + blockHash2, + }, + listOfPublicKey, + [2][]byte{ + aggregatedSignature1.Marshal(), + aggregatedSignature2.Marshal(), + }, + ) + if err != nil { + b.Fatalf("Failed to pack contract input, err: %s", err) + } + + output, err := contractAbi.Methods[validateFinalityVoteProof].Outputs.Pack(true) + if err != nil { + b.Fatalf("Failed to pack contract output, err: %s", err) + } + + test := precompiledTest{ + Input: common.Bytes2Hex(input), + Expected: common.Bytes2Hex(output), + Name: "200-public-keys", + } + + benchmarkPrecompiled("69", test, b) +} diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index f70f78fd35..c73fc27ccc 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -20,14 +20,15 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/log" "io/ioutil" "os" "strings" "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/common" ) @@ -70,6 +71,7 @@ var allPrecompiles = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{17}): &bls12381MapG1{}, common.BytesToAddress([]byte{18}): &bls12381MapG2{}, common.BytesToAddress([]byte{101}): &consortiumLog{}, + common.BytesToAddress([]byte{105}): &consortiumValidateFinalityProof{}, } // EIP-152 test vectors diff --git a/core/vote/vote_manager.go b/core/vote/vote_manager.go new file mode 100644 index 0000000000..9722bcfaeb --- /dev/null +++ b/core/vote/vote_manager.go @@ -0,0 +1,209 @@ +package vote + +import ( + "encoding/hex" + + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" +) + +var votesManagerCounter = metrics.NewRegisteredCounter("votesManager/local", nil) + +// Backend wraps all methods required for voting. +type Backend interface { + IsMining() bool + EventMux() *event.TypeMux +} + +type Debug struct { + ValidateRule func(header *types.Header) error +} + +// VoteManager will handle the vote produced by self. +type VoteManager struct { + eth Backend + db ethdb.Database + + chain *core.BlockChain + chainconfig *params.ChainConfig + + chainHeadCh chan core.ChainHeadEvent + chainHeadSub event.Subscription + + pool *VotePool + signer *VoteSigner + + engine consensus.FastFinalityPoSA + + // debug is a set of function which are used to debug any function called in VoteManager + debug *Debug +} + +func NewVoteManager( + eth Backend, + db ethdb.Database, + chainconfig *params.ChainConfig, + chain *core.BlockChain, + pool *VotePool, + enableSign bool, + blsPasswordPath, blsWalletPath string, + engine consensus.FastFinalityPoSA, + debug *Debug, +) (*VoteManager, error) { + voteManager := &VoteManager{ + eth: eth, + db: db, + + chain: chain, + chainconfig: chainconfig, + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), + + pool: pool, + engine: engine, + debug: debug, + } + + if enableSign { + // Create voteSigner. + voteSigner, err := NewVoteSigner(blsPasswordPath, blsWalletPath) + if err != nil { + return nil, err + } + log.Info("BLS voter public key", "public key", hex.EncodeToString(voteSigner.pubKey[:])) + voteManager.signer = voteSigner + } + + // Subscribe to chain head event. + voteManager.chainHeadSub = voteManager.chain.SubscribeChainHeadEvent(voteManager.chainHeadCh) + + go voteManager.loop() + + return voteManager, nil +} + +func (voteManager *VoteManager) loop() { + log.Debug("vote manager routine loop started") + events := voteManager.eth.EventMux().Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{}) + defer func() { + log.Debug("vote manager loop defer func occur") + if !events.Closed() { + log.Debug("event not closed, unsubscribed by vote manager loop") + events.Unsubscribe() + } + }() + + dlEventCh := events.Chan() + + startVote := true + for { + select { + case ev := <-dlEventCh: + if ev == nil { + log.Debug("dlEvent is nil, continue") + continue + } + switch ev.Data.(type) { + case downloader.StartEvent: + log.Debug("downloader is in startEvent mode, will not startVote") + startVote = false + case downloader.FailedEvent: + log.Debug("downloader is in FailedEvent mode, set startVote flag as true") + startVote = true + case downloader.DoneEvent: + log.Debug("downloader is in DoneEvent mode, set the startVote flag to true") + startVote = true + } + case cHead := <-voteManager.chainHeadCh: + if !startVote { + log.Debug("startVote flag is false, continue") + continue + } + if !voteManager.eth.IsMining() { + log.Debug("skip voting because mining is disabled, continue") + continue + } + if voteManager.signer == nil { + log.Debug("voting is disable, skip voting") + continue + } + + if cHead.Block == nil { + log.Debug("cHead.Block is nil, continue") + continue + } + + curHead := cHead.Block.Header() + // Check if cur validator is within the validatorSet at curHead + if !voteManager.engine.IsActiveValidatorAt(voteManager.chain, curHead) { + log.Debug("cur validator is not within the validatorSet at curHead") + continue + } + + // Vote for curBlockHeader block. + vote := &types.VoteData{ + TargetNumber: curHead.Number.Uint64(), + TargetHash: curHead.Hash(), + } + voteMessage := &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + Data: vote, + }, + } + + // Put Vote into journal and VotesPool if we are active validator and allow to sign it. + if ok := voteManager.UnderRules(curHead); ok { + log.Debug("curHead is underRules for voting") + if err := voteManager.signer.SignVote(voteMessage); err != nil { + log.Error("Failed to sign vote", "err", err, "votedBlockNumber", voteMessage.Data.TargetNumber, "votedBlockHash", voteMessage.Data.TargetHash, "voteMessageHash", voteMessage.Hash()) + votesSigningErrorCounter.Inc(1) + continue + } + rawdb.WriteHighestFinalityVote(voteManager.db, curHead.Number.Uint64()) + + log.Debug("vote manager produced vote", "votedBlockNumber", voteMessage.Data.TargetNumber, "votedBlockHash", voteMessage.Data.TargetHash, "voteMessageHash", voteMessage.Hash()) + // This is a local vote so just pass the dummy peer information + voteManager.pool.PutVote("", voteMessage) + votesManagerCounter.Inc(1) + } + case <-voteManager.chainHeadSub.Err(): + log.Debug("voteManager subscribed chainHead failed") + return + } + } +} + +// UnderRules checks if the produced header under the following rules: +// A validator must not publish two distinct votes for the same height. (Rule 1) +// Validators always vote for their canonical chain’s latest block. (Rule 2) +func (voteManager *VoteManager) UnderRules(header *types.Header) bool { + // call debug method + if voteManager.debug != nil && voteManager.debug.ValidateRule != nil { + if err := voteManager.debug.ValidateRule(header); err != nil { + log.Debug("error while call debug.ValidateRule", "err", err) + return false + } + } + + highestFinalityVote := rawdb.ReadHighestFinalityVote(voteManager.db) + + // Rule: A validator only votes for the block with a bigger block height than its previous vote + // This rule implies rule: A validator must not publish two distinct votes for the same height + targetNumber := header.Number.Uint64() + if highestFinalityVote != nil && targetNumber <= *highestFinalityVote { + log.Debug("err: A validator must not publish two distinct votes for the same height.") + return false + } + + // Rule: Validators always vote for their canonical chain’s latest block. + // Since the header subscribed to is the canonical chain, so this rule is satisfied by default. + log.Debug("All rules check passed") + return true +} diff --git a/core/vote/vote_pool.go b/core/vote/vote_pool.go new file mode 100644 index 0000000000..03de97c1bc --- /dev/null +++ b/core/vote/vote_pool.go @@ -0,0 +1,465 @@ +package vote + +import ( + "container/heap" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +const ( + maxFutureVoteAmountPerBlock = 50 + maxFutureVotePerPeer = 25 + + voteBufferForPut = 256 + // votes in the range (currentBlockNum-256,currentBlockNum+11] will be stored + lowerLimitOfVoteBlockNumber = 256 + upperLimitOfVoteBlockNumber = 11 // refer to fetcher.maxUncleDist + + chainHeadChanSize = 10 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. + + fetchCheckFrequency = 1 * time.Millisecond + fetchRetry = 500 +) + +var ( + localCurVotesPqGauge = metrics.NewRegisteredGauge("curVotesPq/local", nil) + localFutureVotesPqGauge = metrics.NewRegisteredGauge("futureVotesPq/local", nil) +) + +type VoteBox struct { + blockNumber uint64 + voteMessages []*types.VoteEnvelope +} + +// voteWithPeer is a wrapper around VoteEnvelop to include peer information +type voteWithPeer struct { + vote *types.VoteEnvelope + peer string +} + +type VotePool struct { + chain *core.BlockChain + mu sync.RWMutex + + votesFeed event.Feed + scope event.SubscriptionScope + + curVotes map[common.Hash]*VoteBox + futureVotes map[common.Hash]*VoteBox + + curVotesPq *votesPriorityQueue + futureVotesPq *votesPriorityQueue + + chainHeadCh chan core.ChainHeadEvent + chainHeadSub event.Subscription + + votesCh chan *voteWithPeer + + engine consensus.FastFinalityPoSA + maxCurVoteAmountPerBlock int + + numFutureVotePerPeer map[string]uint64 // number of queued votes per peer + originatedFrom map[common.Hash]string // mapping from vote hash to the sender + justifiedBlockNumber uint64 +} + +type votesPriorityQueue []*types.VoteData + +func NewVotePool( + chain *core.BlockChain, + engine consensus.FastFinalityPoSA, + maxCurVoteAmountPerBlock int, +) *VotePool { + votePool := &VotePool{ + chain: chain, + curVotes: make(map[common.Hash]*VoteBox), + futureVotes: make(map[common.Hash]*VoteBox), + curVotesPq: &votesPriorityQueue{}, + futureVotesPq: &votesPriorityQueue{}, + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), + votesCh: make(chan *voteWithPeer, voteBufferForPut), + engine: engine, + maxCurVoteAmountPerBlock: maxCurVoteAmountPerBlock, + numFutureVotePerPeer: make(map[string]uint64), + originatedFrom: make(map[common.Hash]string), + } + + // Subscribe events from blockchain and start the main event loop. + votePool.chainHeadSub = votePool.chain.SubscribeChainHeadEvent(votePool.chainHeadCh) + + go votePool.loop() + return votePool +} + +// loop is the vote pool's main even loop, waiting for and reacting to outside blockchain events and votes channel event. +func (pool *VotePool) loop() { + for { + select { + // Handle ChainHeadEvent. + case ev := <-pool.chainHeadCh: + if ev.Block != nil { + latestBlockNumber := ev.Block.NumberU64() + justifiedBlockNumber, _ := pool.engine.GetJustifiedBlock(pool.chain, ev.Block.NumberU64(), ev.Block.Hash()) + + pool.mu.Lock() + pool.justifiedBlockNumber = justifiedBlockNumber + pool.prune(latestBlockNumber) + pool.transferVotesFromFutureToCur(ev.Block.Header()) + pool.mu.Unlock() + } + case <-pool.chainHeadSub.Err(): + return + + // Handle votes channel and put the vote into vote pool. + case vote := <-pool.votesCh: + pool.putIntoVotePool(vote) + } + } +} + +func (pool *VotePool) PutVote(peer string, vote *types.VoteEnvelope) { + select { + case pool.votesCh <- &voteWithPeer{vote: vote, peer: peer}: + default: + log.Debug("Failed to put vote into vote pool") + } +} + +func (pool *VotePool) putIntoVotePool(voteWithPeerInfo *voteWithPeer) bool { + vote := voteWithPeerInfo.vote + peer := voteWithPeerInfo.peer + + targetNumber := vote.Data.TargetNumber + targetHash := vote.Data.TargetHash + header := pool.chain.CurrentBlock().Header() + headNumber := header.Number.Uint64() + + // Make sure in the range (currentHeight-lowerLimitOfVoteBlockNumber, currentHeight+upperLimitOfVoteBlockNumber]. + if targetNumber+lowerLimitOfVoteBlockNumber-1 < headNumber || targetNumber > headNumber+upperLimitOfVoteBlockNumber { + log.Debug("BlockNumber of vote is outside the range of header-256~header+11, will be discarded") + return false + } + + pool.mu.Lock() + defer pool.mu.Unlock() + + if targetNumber <= pool.justifiedBlockNumber { + log.Debug("BlockNumber of vote is older than justified block number") + return false + } + + voteHash := vote.Hash() + if _, ok := pool.originatedFrom[voteHash]; ok { + log.Debug("Vote pool already contained the same vote", "voteHash", voteHash) + return false + } + pool.originatedFrom[voteHash] = peer + + voteData := &types.VoteData{ + TargetNumber: targetNumber, + TargetHash: targetHash, + } + + var votes map[common.Hash]*VoteBox + var votesPq *votesPriorityQueue + isFutureVote := false + + voteBlock := pool.chain.GetHeaderByHash(targetHash) + if voteBlock == nil { + votes = pool.futureVotes + votesPq = pool.futureVotesPq + isFutureVote = true + } else { + votes = pool.curVotes + votesPq = pool.curVotesPq + } + + if isFutureVote { + // As we cannot fully verify the future vote, we need to set a limit of + // future votes per peer to void be DOSed by peer. + if pool.numFutureVotePerPeer[peer] >= maxFutureVotePerPeer { + return false + } + pool.numFutureVotePerPeer[peer]++ + } + + if ok := pool.basicVerify(vote, headNumber, votes, isFutureVote, voteHash); !ok { + if isFutureVote { + pool.numFutureVotePerPeer[peer]-- + } + return false + } + + if !isFutureVote { + // Verify if the vote comes from valid validators based on voteAddress (BLSPublicKey), only verify curVotes here, will verify futureVotes in transfer process. + if pool.engine.VerifyVote(pool.chain, vote) != nil { + return false + } + + // Send vote for handler usage of broadcasting to peers. + voteEv := core.NewVoteEvent{Vote: vote} + pool.votesFeed.Send(voteEv) + } + + pool.putVote(votes, votesPq, vote, voteData, voteHash, isFutureVote) + + return true +} + +func (pool *VotePool) SubscribeNewVoteEvent(ch chan<- core.NewVoteEvent) event.Subscription { + return pool.scope.Track(pool.votesFeed.Subscribe(ch)) +} + +// The vote pool's mutex must already be acquired when calling this function +func (pool *VotePool) putVote(m map[common.Hash]*VoteBox, votesPq *votesPriorityQueue, vote *types.VoteEnvelope, voteData *types.VoteData, voteHash common.Hash, isFutureVote bool) { + targetHash := vote.Data.TargetHash + targetNumber := vote.Data.TargetNumber + + log.Debug("The vote info to put is:", "voteBlockNumber", targetNumber, "voteBlockHash", targetHash) + + if _, ok := m[targetHash]; !ok { + // Push into votes priorityQueue if not exist in corresponding votes Map. + // To be noted: will not put into priorityQueue if exists in map to avoid duplicate element with the same voteData. + heap.Push(votesPq, voteData) + voteBox := &VoteBox{ + blockNumber: targetNumber, + voteMessages: make([]*types.VoteEnvelope, 0, maxFutureVoteAmountPerBlock), + } + m[targetHash] = voteBox + + if isFutureVote { + localFutureVotesPqGauge.Update(int64(votesPq.Len())) + } else { + localCurVotesPqGauge.Update(int64(votesPq.Len())) + } + } + + // Put into corresponding votes map. + m[targetHash].voteMessages = append(m[targetHash].voteMessages, vote) + log.Debug("VoteHash put into votepool is:", "voteHash", voteHash) +} + +// The caller must hold the pool mutex +func (pool *VotePool) transferVotesFromFutureToCur(latestBlockHeader *types.Header) { + futurePq := pool.futureVotesPq + latestBlockNumber := latestBlockHeader.Number.Uint64() + + // For vote in the range [,latestBlockNumber-11), transfer to cur if valid. + for futurePq.Len() > 0 && futurePq.Peek().TargetNumber+upperLimitOfVoteBlockNumber < latestBlockNumber { + blockHash := futurePq.Peek().TargetHash + pool.transfer(blockHash) + } + + // For vote in the range [latestBlockNumber-11,latestBlockNumber], only transfer the vote inside the local fork. + futurePqBuffer := make([]*types.VoteData, 0) + for futurePq.Len() > 0 && futurePq.Peek().TargetNumber <= latestBlockNumber { + blockHash := futurePq.Peek().TargetHash + header := pool.chain.GetHeaderByHash(blockHash) + if header == nil { + // Put into pq buffer used for later put again into futurePq + futurePqBuffer = append(futurePqBuffer, heap.Pop(futurePq).(*types.VoteData)) + continue + } + pool.transfer(blockHash) + } + + for _, voteData := range futurePqBuffer { + heap.Push(futurePq, voteData) + } +} + +// The vote pool's mutex must already be acquired when calling this function +func (pool *VotePool) transfer(blockHash common.Hash) { + curPq, futurePq := pool.curVotesPq, pool.futureVotesPq + curVotes, futureVotes := pool.curVotes, pool.futureVotes + voteData := heap.Pop(futurePq) + + defer localFutureVotesPqGauge.Update(int64(futurePq.Len())) + + voteBox, ok := futureVotes[blockHash] + if !ok { + return + } + + validVotes := make([]*types.VoteEnvelope, 0, len(voteBox.voteMessages)) + for _, vote := range voteBox.voteMessages { + // Verify if the vote comes from valid validators based on voteAddress (BLSPublicKey). + if pool.engine.VerifyVote(pool.chain, vote) != nil { + continue + } + + // In the process of transfer, send valid vote to votes channel for handler usage + voteEv := core.NewVoteEvent{Vote: vote} + pool.votesFeed.Send(voteEv) + validVotes = append(validVotes, vote) + } + + // may len(curVotes[blockHash].voteMessages) extra maxCurVoteAmountPerBlock, but it doesn't matter + if _, ok := curVotes[blockHash]; !ok { + heap.Push(curPq, voteData) + curVotes[blockHash] = &VoteBox{voteBox.blockNumber, validVotes} + localCurVotesPqGauge.Update(int64(curPq.Len())) + } else { + curVotes[blockHash].voteMessages = append(curVotes[blockHash].voteMessages, validVotes...) + } + + for _, vote := range futureVotes[blockHash].voteMessages { + peer, ok := pool.originatedFrom[vote.Hash()] + if !ok { + log.Debug("Cannot find the sender of vote", "voteHash", vote.Hash()) + continue + } + pool.numFutureVotePerPeer[peer]-- + } + delete(futureVotes, blockHash) +} + +func (pool *VotePool) pruneVote( + latestBlockNumber uint64, + voteMap map[common.Hash]*VoteBox, + voteQueue *votesPriorityQueue, + isFuture bool, +) { + // delete votes older than or equal to latestBlockNumber-lowerLimitOfVoteBlockNumber or justified block number + for voteQueue.Len() > 0 { + vote := voteQueue.Peek() + if vote.TargetNumber+lowerLimitOfVoteBlockNumber-1 < latestBlockNumber || vote.TargetNumber <= pool.justifiedBlockNumber { + blockHash := heap.Pop(voteQueue).(*types.VoteData).TargetHash + + if isFuture { + localFutureVotesPqGauge.Update(int64(voteQueue.Len())) + } else { + localCurVotesPqGauge.Update(int64(voteQueue.Len())) + } + + if voteBox, ok := voteMap[blockHash]; ok { + voteMessages := voteBox.voteMessages + for _, voteMessage := range voteMessages { + voteHash := voteMessage.Hash() + if peer := pool.originatedFrom[voteHash]; peer != "" && isFuture { + pool.numFutureVotePerPeer[peer]-- + } + delete(pool.originatedFrom, voteHash) + } + delete(voteMap, blockHash) + } + } else { + break + } + } +} + +// Prune old data of curVotes and futureVotes +// The caller must hold the pool mutex +func (pool *VotePool) prune(latestBlockNumber uint64) { + pool.pruneVote(latestBlockNumber, pool.curVotes, pool.curVotesPq, false) + pool.pruneVote(latestBlockNumber, pool.futureVotes, pool.futureVotesPq, true) +} + +// GetVotes as batch. +func (pool *VotePool) GetVotes() []*types.VoteEnvelope { + pool.mu.RLock() + defer pool.mu.RUnlock() + + votesRes := make([]*types.VoteEnvelope, 0) + curVotes := pool.curVotes + for _, voteBox := range curVotes { + votesRes = append(votesRes, voteBox.voteMessages...) + } + return votesRes +} + +// FetchVoteByBlockHash reads the finality votes for the provided block hash, the concurrent +// writers may block this function from acquiring the read lock. This function does not sleep +// and wait for acquiring the lock but keep polling the lock fetchRetry times and returns nil +// if it still cannot acquire the lock. This mechanism helps to make this function safer +// because we cannot control the writers and we don't want this function to block the caller. +func (pool *VotePool) FetchVoteByBlockHash(blockHash common.Hash) []*types.VoteEnvelope { + var retry int + for retry = 0; retry < fetchRetry; retry++ { + if !pool.mu.TryRLock() { + time.Sleep(fetchCheckFrequency) + } else { + break + } + } + + // We try to acquire read lock fetchRetry times + // but can not do it, so just return nil here + if retry == fetchRetry { + return nil + } + + // We successfully acquire the read lock, read + // the vote and remember to release the lock + defer pool.mu.RUnlock() + if _, ok := pool.curVotes[blockHash]; ok { + return pool.curVotes[blockHash].voteMessages + } else { + return nil + } +} + +func (pool *VotePool) basicVerify(vote *types.VoteEnvelope, headNumber uint64, m map[common.Hash]*VoteBox, isFutureVote bool, voteHash common.Hash) bool { + targetHash := vote.Data.TargetHash + + // To prevent DOS attacks, make sure no more than 21 votes per blockHash if not futureVotes + // and no more than 50 votes per blockHash if futureVotes. + maxVoteAmountPerBlock := pool.maxCurVoteAmountPerBlock + if isFutureVote { + maxVoteAmountPerBlock = maxFutureVoteAmountPerBlock + } + if voteBox, ok := m[targetHash]; ok { + if len(voteBox.voteMessages) >= maxVoteAmountPerBlock { + return false + } + } + + // Verify bls signature. + if err := vote.Verify(); err != nil { + log.Error("Failed to verify voteMessage", "err", err) + return false + } + + return true +} + +func (pq votesPriorityQueue) Less(i, j int) bool { + return pq[i].TargetNumber < pq[j].TargetNumber +} + +func (pq votesPriorityQueue) Len() int { + return len(pq) +} + +func (pq votesPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *votesPriorityQueue) Push(vote interface{}) { + curVote := vote.(*types.VoteData) + *pq = append(*pq, curVote) +} + +func (pq *votesPriorityQueue) Pop() interface{} { + tmp := *pq + l := len(tmp) + var res interface{} = tmp[l-1] + *pq = tmp[:l-1] + return res +} + +func (pq *votesPriorityQueue) Peek() *types.VoteData { + if pq.Len() == 0 { + return nil + } + return (*pq)[0] +} diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go new file mode 100644 index 0000000000..2bd1b57fc1 --- /dev/null +++ b/core/vote/vote_pool_test.go @@ -0,0 +1,531 @@ +package vote + +import ( + "container/heap" + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + wallet "github.com/ethereum/go-ethereum/accounts/bls" + "github.com/ethereum/go-ethereum/crypto/bls" + "github.com/google/uuid" + keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" +) + +var ( + // testKey is a private key to use for funding a tester account. + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + + // testAddr is the Ethereum address of the tester account. + testAddr = crypto.PubkeyToAddress(testKey.PublicKey) + + password = "secretPassword" + + timeThreshold = 30 +) + +type mockPOSA struct { + consensus.FastFinalityPoSA +} + +// testBackend is a mock implementation of the live Ethereum message handler. +type testBackend struct { + eventMux *event.TypeMux +} + +func newTestBackend() *testBackend { + return &testBackend{eventMux: new(event.TypeMux)} +} +func (b *testBackend) IsMining() bool { return true } +func (b *testBackend) EventMux() *event.TypeMux { return b.eventMux } + +func (p *mockPOSA) GetJustifiedBlock(chain consensus.ChainHeaderReader, blockNumber uint64, blockHash common.Hash) (uint64, common.Hash) { + return 0, common.Hash{} +} + +func (m *mockPOSA) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { + return nil +} + +func (m *mockPOSA) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *types.Header) bool { + return true +} + +func (pool *VotePool) verifyStructureSizeOfVotePool(curVotes, futureVotes, curVotesPq, futureVotesPq int) bool { + for i := 0; i < timeThreshold; i++ { + time.Sleep(1 * time.Second) + if len(pool.curVotes) == curVotes && len(pool.futureVotes) == futureVotes && pool.curVotesPq.Len() == curVotesPq && pool.futureVotesPq.Len() == futureVotesPq { + return true + } + } + return false +} + +func TestValidVotePool(t *testing.T) { + testVotePool(t, true) +} + +func TestInvalidVotePool(t *testing.T) { + testVotePool(t, false) +} + +func testVotePool(t *testing.T, isValidRules bool) { + walletPasswordDir, walletDir := setUpKeyManager(t) + + // Create a database pre-initialize with a genesis block + db := rawdb.NewMemoryDatabase() + genesis := (&core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, + BaseFee: big.NewInt(params.InitialBaseFee), + }).MustCommit(db) + chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil) + + mux := new(event.TypeMux) + mockEngine := &mockPOSA{} + + // Create vote pool + votePool := NewVotePool(chain, mockEngine, 22) + + // Create vote manager + // Create a temporary file for the votes journal + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("failed to create temporary file path: %v", err) + } + journal := file.Name() + defer os.Remove(journal) + + // Clean up the temporary file, we only need the path for now + file.Close() + os.Remove(journal) + + var ( + voteManager *VoteManager + ) + if isValidRules { + voteManager, err = NewVoteManager(newTestBackend(), db, params.TestChainConfig, chain, votePool, true, walletPasswordDir, walletDir, mockEngine, nil) + } else { + voteManager, err = NewVoteManager(newTestBackend(), db, params.TestChainConfig, chain, votePool, true, walletPasswordDir, walletDir, mockEngine, &Debug{ValidateRule: func(header *types.Header) error { + return errors.New("mock error") + }}) + } + + if err != nil { + t.Fatalf("failed to create vote managers") + } + + // Send the done event of downloader + time.Sleep(10 * time.Millisecond) + mux.Post(downloader.DoneEvent{}) + + bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + for i := 0; i < 10; i++ { + bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + } + + if !isValidRules { + if votePool.verifyStructureSizeOfVotePool(11, 0, 11, 0) { + t.Fatalf("put vote failed") + } + return + } + + if !votePool.verifyStructureSizeOfVotePool(11, 0, 11, 0) { + t.Fatalf("put vote failed") + } + + // Verify if votesPq is min heap + votesPq := votePool.curVotesPq + pqBuffer := make([]*types.VoteData, 0) + lastVotedBlockNumber := uint64(0) + for votesPq.Len() > 0 { + voteData := heap.Pop(votesPq).(*types.VoteData) + if voteData.TargetNumber < lastVotedBlockNumber { + t.Fatalf("votesPq verification failed") + } + lastVotedBlockNumber = voteData.TargetNumber + pqBuffer = append(pqBuffer, voteData) + } + for _, voteData := range pqBuffer { + heap.Push(votesPq, voteData) + } + + bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + + if !votePool.verifyStructureSizeOfVotePool(12, 0, 12, 0) { + t.Fatalf("put vote failed") + } + + for i := 0; i < 256; i++ { + bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + } + + // currently chain size is 268, and votePool should be pruned, so vote pool size should be 256! + if !votePool.verifyStructureSizeOfVotePool(256, 0, 256, 0) { + t.Fatalf("put vote failed") + } + + // Test invalid vote whose number larger than latestHeader + 13 + invalidVote := &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + Data: &types.VoteData{ + TargetNumber: 1000, + }, + }, + } + voteManager.pool.PutVote("", invalidVote) + + if !votePool.verifyStructureSizeOfVotePool(256, 0, 256, 0) { + t.Fatalf("put vote failed") + } + + votes := votePool.GetVotes() + if len(votes) != 256 { + t.Fatalf("get votes failed") + } + + // Test future votes scenario: votes number within latestBlockHeader ~ latestBlockHeader + 13 + futureVote := &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + Data: &types.VoteData{ + TargetNumber: 279, + }, + }, + } + if err := voteManager.signer.SignVote(futureVote); err != nil { + t.Fatalf("sign vote failed") + } + voteManager.pool.PutVote("", futureVote) + + if !votePool.verifyStructureSizeOfVotePool(256, 1, 256, 1) { + t.Fatalf("put vote failed") + } + + // Test duplicate vote case, shouldn'd be put into vote pool + duplicateVote := &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + Data: &types.VoteData{ + TargetNumber: 279, + }, + }, + } + if err := voteManager.signer.SignVote(duplicateVote); err != nil { + t.Fatalf("sign vote failed") + } + voteManager.pool.PutVote("", duplicateVote) + + if !votePool.verifyStructureSizeOfVotePool(256, 1, 256, 1) { + t.Fatalf("put vote failed") + } + + // Test future votes larger than latestBlockNumber + 13 should be rejected + futureVote = &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + Data: &types.VoteData{ + TargetNumber: 282, + TargetHash: common.Hash{}, + }, + }, + } + voteManager.pool.PutVote("", futureVote) + if !votePool.verifyStructureSizeOfVotePool(256, 1, 256, 1) { + t.Fatalf("put vote failed") + } + + // Test transfer votes from future to cur, latest block header is #288 after the following generation + // For the above BlockNumber 279, it did not have blockHash, should be assigned as well below. + curNumber := 268 + var futureBlockHash common.Hash + for i := 0; i < 20; i++ { + bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil, true) + curNumber += 1 + if curNumber == 279 { + futureBlockHash = bs[0].Hash() + futureVotesMap := votePool.futureVotes + voteBox := futureVotesMap[common.Hash{}] + futureVotesMap[futureBlockHash] = voteBox + delete(futureVotesMap, common.Hash{}) + futureVotesPq := votePool.futureVotesPq + futureVotesPq.Peek().TargetHash = futureBlockHash + } + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + } + + for i := 0; i < timeThreshold; i++ { + time.Sleep(1 * time.Second) + _, ok := votePool.curVotes[futureBlockHash] + if ok && len(votePool.curVotes[futureBlockHash].voteMessages) == 2 { + break + } + } + if votePool.curVotes[futureBlockHash] == nil || len(votePool.curVotes[futureBlockHash].voteMessages) != 2 { + t.Fatalf("transfer vote failed") + } + + // Pruner will keep the size of votePool as latestBlockHeader-255~latestBlockHeader, then final result should be 256! + if !votePool.verifyStructureSizeOfVotePool(256, 0, 256, 0) { + t.Fatalf("put vote failed") + } + + for i := 0; i < 224; i++ { + bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + } + + bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } +} + +func setUpKeyManager(t *testing.T) (string, string) { + walletDir := filepath.Join(t.TempDir(), "wallet") + walletPasswordDir := filepath.Join(t.TempDir(), "password") + if err := os.MkdirAll(filepath.Dir(walletPasswordDir), 0700); err != nil { + t.Fatalf("failed to create walletPassword dir: %v", err) + } + if err := ioutil.WriteFile(walletPasswordDir, []byte(password), 0600); err != nil { + t.Fatalf("failed to write wallet password dir: %v", err) + } + if err := os.MkdirAll(walletDir, 0700); err != nil { + t.Fatalf("failed to create wallet dir: %v", err) + } + w, err := wallet.New(walletDir, walletPasswordDir) + if err != nil { + t.Fatalf("failed to create wallet: %v", err) + } + km, _ := wallet.NewKeyManager(context.Background(), w) + secretKey, _ := bls.RandKey() + encryptor := keystorev4.New() + pubKeyBytes := secretKey.PublicKey().Marshal() + cryptoFields, err := encryptor.Encrypt(secretKey.Marshal(), password) + if err != nil { + t.Fatalf("failed: %v", err) + } + + id, _ := uuid.NewRandom() + keystore := &wallet.Keystore{ + Crypto: cryptoFields, + ID: id.String(), + Pubkey: fmt.Sprintf("%x", pubKeyBytes), + Version: encryptor.Version(), + Name: encryptor.Name(), + } + + encodedFile, _ := json.MarshalIndent(keystore, "", "\t") + keyStoreDir := filepath.Join(t.TempDir(), "keystore") + keystoreFile, _ := os.Create(fmt.Sprintf("%s/keystore-%s.json", keyStoreDir, "publichh")) + keystoreFile.Write(encodedFile) + km.ImportKeystores(context.Background(), []*wallet.Keystore{keystore}, []string{password}) + return walletPasswordDir, walletDir +} + +func generateVote( + blockNumber int, + blockHash common.Hash, + secretKey blsCommon.SecretKey, +) *types.VoteEnvelope { + voteData := types.VoteData{ + TargetNumber: uint64(blockNumber), + TargetHash: blockHash, + } + digest := voteData.Hash() + signature := secretKey.Sign(digest[:]) + + vote := &types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + PublicKey: types.BLSPublicKey(secretKey.PublicKey().Marshal()), + Signature: types.BLSSignature(signature.Marshal()), + Data: &voteData, + }, + } + + return vote +} + +func TestVotePoolDosProtection(t *testing.T) { + secretKey, err := bls.RandKey() + if err != nil { + t.Fatalf("Failed to create secret key, err %s", err) + } + + // Create a database pre-initialize with a genesis block + db := rawdb.NewMemoryDatabase() + genesis := (&core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, + BaseFee: big.NewInt(params.InitialBaseFee), + }).MustCommit(db) + chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil) + + bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 25, nil, true) + if _, err := chain.InsertChain(bs[:1]); err != nil { + panic(err) + } + mockEngine := &mockPOSA{} + + // Create vote pool + votePool := NewVotePool(chain, mockEngine, 22) + + for i := 0; i < maxFutureVotePerPeer; i++ { + vote := generateVote(1, common.BigToHash(big.NewInt(int64(i+1))), secretKey) + votePool.PutVote("AAAA", vote) + time.Sleep(100 * time.Millisecond) + } + + if len(*votePool.futureVotesPq) != maxFutureVotePerPeer { + t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq)) + } + if votePool.numFutureVotePerPeer["AAAA"] != maxFutureVotePerPeer { + t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, votePool.numFutureVotePerPeer["AAAA"]) + } + + // This vote is dropped due to DOS protection + vote := generateVote(1, common.BigToHash(big.NewInt(int64(maxFutureVoteAmountPerBlock+1))), secretKey) + votePool.PutVote("AAAA", vote) + time.Sleep(100 * time.Millisecond) + if len(*votePool.futureVotesPq) != maxFutureVotePerPeer { + t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq)) + } + if votePool.numFutureVotePerPeer["AAAA"] != maxFutureVotePerPeer { + t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, votePool.numFutureVotePerPeer["AAAA"]) + } + + // Vote from different peer must be accepted + vote = generateVote(1, common.BigToHash(big.NewInt(int64(maxFutureVoteAmountPerBlock+2))), secretKey) + votePool.PutVote("BBBB", vote) + time.Sleep(100 * time.Millisecond) + if len(*votePool.futureVotesPq) != maxFutureVotePerPeer+1 { + t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq)) + } + if votePool.numFutureVotePerPeer["AAAA"] != maxFutureVotePerPeer { + t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, votePool.numFutureVotePerPeer["AAAA"]) + } + if votePool.numFutureVotePerPeer["BBBB"] != 1 { + t.Fatalf("Number of future vote per peer, expect %d have %d", 1, votePool.numFutureVotePerPeer["BBBB"]) + } + + // One vote is not queued twice + votePool.PutVote("CCCC", vote) + time.Sleep(100 * time.Millisecond) + if len(*votePool.futureVotesPq) != maxFutureVotePerPeer+1 { + t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq)) + } + if votePool.numFutureVotePerPeer["CCCC"] != 0 { + t.Fatalf("Number of future vote per peer, expect %d have %d", 0, votePool.numFutureVotePerPeer["CCCC"]) + } + + if _, err := chain.InsertChain(bs[1:]); err != nil { + panic(err) + } + time.Sleep(100 * time.Millisecond) + // Future vote must be transferred to current and failed the verification, + // numFutureVotePerPeer decreases + if len(*votePool.futureVotesPq) != 0 { + t.Fatalf("Future vote pool length, expect %d have %d", 0, len(*votePool.futureVotesPq)) + } + if votePool.numFutureVotePerPeer["AAAA"] != 0 { + t.Fatalf("Number of future vote per peer, expect %d have %d", 0, votePool.numFutureVotePerPeer["AAAA"]) + } +} + +type mockPOSAv2 struct { + consensus.FastFinalityPoSA +} + +func (p *mockPOSAv2) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, header *types.Header) (uint64, common.Hash, error) { + parentHeader := chain.GetHeaderByHash(header.ParentHash) + if parentHeader == nil { + return 0, common.Hash{}, fmt.Errorf("unexpected error") + } + return parentHeader.Number.Uint64(), parentHeader.Hash(), nil +} + +func (m *mockPOSAv2) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { + header := chain.GetHeaderByHash(vote.Data.TargetHash) + if header == nil { + return errors.New("header not found") + } + + if header.Number.Uint64() != vote.Data.TargetNumber { + return errors.New("wrong target number in vote") + } + + return nil +} + +func (m *mockPOSAv2) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header *types.Header) bool { + return true +} + +func TestVotePoolWrongTargetNumber(t *testing.T) { + secretKey, err := bls.RandKey() + if err != nil { + t.Fatalf("Failed to create secret key, err %s", err) + } + + // Create a database pre-initialize with a genesis block + db := rawdb.NewMemoryDatabase() + genesis := (&core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, + BaseFee: big.NewInt(params.InitialBaseFee), + }).MustCommit(db) + chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil) + + bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true) + if _, err := chain.InsertChain(bs[:1]); err != nil { + panic(err) + } + mockEngine := &mockPOSAv2{} + + // Create vote pool + votePool := NewVotePool(chain, mockEngine, 22) + + // bs[0] is the block 1 so the target block number must be 1. + // Here we provide wrong target number 0 + vote := generateVote(0, bs[0].Hash(), secretKey) + votePool.PutVote("AAAA", vote) + time.Sleep(100 * time.Millisecond) + + if len(votePool.curVotes) != 0 { + t.Fatalf("Current vote length, expect %d have %d", 0, len(votePool.curVotes)) + } +} diff --git a/core/vote/vote_signer.go b/core/vote/vote_signer.go new file mode 100644 index 0000000000..c687b37fd9 --- /dev/null +++ b/core/vote/vote_signer.go @@ -0,0 +1,87 @@ +package vote + +import ( + "context" + "time" + + wallet "github.com/ethereum/go-ethereum/accounts/bls" + "github.com/ethereum/go-ethereum/params" + + "github.com/pkg/errors" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/bls" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +const ( + voteSignerTimeout = time.Second * 5 +) + +var votesSigningErrorCounter = metrics.NewRegisteredCounter("votesSigner/error", nil) + +type VoteSigner struct { + km *wallet.KeyManager + pubKey [params.BLSPubkeyLength]byte +} + +func NewVoteSigner(blsPasswordPath, blsWalletPath string) (*VoteSigner, error) { + w, err := wallet.New(blsWalletPath, blsPasswordPath) + if err != nil { + log.Error("Failed to open BLS wallet", "err", err) + return nil, err + } + + log.Info("Read BLS wallet password successfully") + + km, err := wallet.NewKeyManager(context.Background(), w) + if err != nil { + log.Error("Initialize key manager failed", "err", err) + return nil, err + } + log.Info("Initialized keymanager successfully") + + ctx, cancel := context.WithTimeout(context.Background(), voteSignerTimeout) + defer cancel() + + pubKeys, err := km.FetchValidatingPublicKeys(ctx) + if err != nil { + return nil, errors.Wrap(err, "could not fetch validating public keys") + } + + if len(pubKeys) < 1 { + return nil, errors.New("no BLS key in keystore") + } + + return &VoteSigner{ + km: km, + pubKey: pubKeys[0], + }, nil +} + +func (signer *VoteSigner) SignVote(vote *types.VoteEnvelope) error { + // Sign the vote, fetch the first pubKey as validator's bls public key. + pubKey := signer.pubKey + blsPubKey, err := bls.PublicKeyFromBytes(pubKey[:]) + if err != nil { + return errors.Wrap(err, "convert public key from bytes to bls failed") + } + + voteDataHash := vote.Data.Hash() + + ctx, cancel := context.WithTimeout(context.Background(), voteSignerTimeout) + defer cancel() + + signature, err := (*signer.km).Sign(ctx, &wallet.SignRequest{ + PublicKey: pubKey[:], + SigningRoot: voteDataHash[:], + }) + if err != nil { + return err + } + + copy(vote.PublicKey[:], blsPubKey.Marshal()[:]) + copy(vote.Signature[:], signature.Marshal()[:]) + return nil +} diff --git a/crypto/bls/bls.go b/crypto/bls/bls.go new file mode 100644 index 0000000000..eb266d2b7e --- /dev/null +++ b/crypto/bls/bls.go @@ -0,0 +1,75 @@ +// Package bls implements a go-wrapper around a library implementing the +// BLS12-381 curve and signature scheme. This package exposes a public API for +// verifying and aggregating BLS signatures used by Ethereum. +package bls + +import ( + "github.com/ethereum/go-ethereum/crypto/bls/blst" + "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/crypto/bls/herumi" +) + +// Initialize herumi temporarily while we transition to blst for ethdo. +func init() { + herumi.HerumiInit() +} + +// SecretKeyFromBytes creates a BLS private key from a BigEndian byte slice. +func SecretKeyFromBytes(privKey []byte) (SecretKey, error) { + return blst.SecretKeyFromBytes(privKey) +} + +// PublicKeyFromBytes creates a BLS public key from a BigEndian byte slice. +func PublicKeyFromBytes(pubKey []byte) (PublicKey, error) { + return blst.PublicKeyFromBytes(pubKey) +} + +// SignatureFromBytes creates a BLS signature from a LittleEndian byte slice. +func SignatureFromBytes(sig []byte) (Signature, error) { + return blst.SignatureFromBytes(sig) +} + +// MultipleSignaturesFromBytes creates a slice of BLS signatures from a LittleEndian 2d-byte slice. +func MultipleSignaturesFromBytes(sigs [][]byte) ([]Signature, error) { + return blst.MultipleSignaturesFromBytes(sigs) +} + +// AggregatePublicKeys aggregates the provided raw public keys into a single key. +func AggregatePublicKeys(pubs [][]byte) (PublicKey, error) { + return blst.AggregatePublicKeys(pubs) +} + +// AggregateMultiplePubkeys aggregates the provided decompressed keys into a single key. +func AggregateMultiplePubkeys(pubs []PublicKey) PublicKey { + return blst.AggregateMultiplePubkeys(pubs) +} + +// AggregateSignatures converts a list of signatures into a single, aggregated sig. +func AggregateSignatures(sigs []common.Signature) common.Signature { + return blst.AggregateSignatures(sigs) +} + +// AggregateCompressedSignatures converts a list of compressed signatures into a single, aggregated sig. +func AggregateCompressedSignatures(multiSigs [][]byte) (common.Signature, error) { + return blst.AggregateCompressedSignatures(multiSigs) +} + +// VerifySignature verifies a single signature. For performance reason, always use VerifyMultipleSignatures if possible. +func VerifySignature(sig []byte, msg [32]byte, pubKey common.PublicKey) (bool, error) { + return blst.VerifySignature(sig, msg, pubKey) +} + +// VerifyMultipleSignatures verifies multiple signatures for distinct messages securely. +func VerifyMultipleSignatures(sigs [][]byte, msgs [][32]byte, pubKeys []common.PublicKey) (bool, error) { + return blst.VerifyMultipleSignatures(sigs, msgs, pubKeys) +} + +// NewAggregateSignature creates a blank aggregate signature. +func NewAggregateSignature() common.Signature { + return blst.NewAggregateSignature() +} + +// RandKey creates a new private key using a random input. +func RandKey() (common.SecretKey, error) { + return blst.RandKey() +} diff --git a/crypto/bls/bls_test.go b/crypto/bls/bls_test.go new file mode 100644 index 0000000000..5c07b4e65f --- /dev/null +++ b/crypto/bls/bls_test.go @@ -0,0 +1,30 @@ +package bls + +import ( + "github.com/stretchr/testify/require" + "testing" + + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +func TestDisallowZeroSecretKeys(t *testing.T) { + t.Run("blst", func(t *testing.T) { + // Blst does a zero check on the key during deserialization. + _, err := SecretKeyFromBytes(common.ZeroSecretKey[:]) + require.Equal(t, common.ErrSecretUnmarshal, err) + }) +} + +func TestDisallowZeroPublicKeys(t *testing.T) { + t.Run("blst", func(t *testing.T) { + _, err := PublicKeyFromBytes(common.InfinitePublicKey[:]) + require.Equal(t, common.ErrInfinitePubKey, err) + }) +} + +func TestDisallowZeroPublicKeys_AggregatePubkeys(t *testing.T) { + t.Run("blst", func(t *testing.T) { + _, err := AggregatePublicKeys([][]byte{common.InfinitePublicKey[:], common.InfinitePublicKey[:]}) + require.Equal(t, common.ErrInfinitePubKey, err) + }) +} diff --git a/crypto/bls/blst/aliases.go b/crypto/bls/blst/aliases.go new file mode 100644 index 0000000000..8f3028a7eb --- /dev/null +++ b/crypto/bls/blst/aliases.go @@ -0,0 +1,11 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst + +import blst "github.com/supranational/blst/bindings/go" + +// Internal types for blst. +type blstPublicKey = blst.P1Affine +type blstSignature = blst.P2Affine +type blstAggregateSignature = blst.P2Aggregate +type blstAggregatePublicKey = blst.P1Aggregate diff --git a/crypto/bls/blst/bls_benchmark_test.go b/crypto/bls/blst/bls_benchmark_test.go new file mode 100644 index 0000000000..a11332882d --- /dev/null +++ b/crypto/bls/blst/bls_benchmark_test.go @@ -0,0 +1,64 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst_test + +import ( + "github.com/stretchr/testify/require" + "testing" + + "github.com/ethereum/go-ethereum/crypto/bls/blst" + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +func BenchmarkSignature_Verify(b *testing.B) { + sk, err := blst.RandKey() + require.NoError(b, err) + + msg := []byte("Some msg") + sig := sk.Sign(msg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !sig.Verify(sk.PublicKey(), msg) { + b.Fatal("could not verify sig") + } + } +} + +func BenchmarkSignature_AggregateVerify(b *testing.B) { + sigN := 128 // MAX_ATTESTATIONS per block. + + var pks []common.PublicKey + var sigs []common.Signature + var msgs [][32]byte + for i := 0; i < sigN; i++ { + msg := [32]byte{'s', 'i', 'g', 'n', 'e', 'd', byte(i)} + sk, err := blst.RandKey() + require.NoError(b, err) + sig := sk.Sign(msg[:]) + pks = append(pks, sk.PublicKey()) + sigs = append(sigs, sig) + msgs = append(msgs, msg) + } + aggregated := blst.AggregateSignatures(sigs) + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if !aggregated.AggregateVerify(pks, msgs) { + b.Fatal("could not verify aggregate sig") + } + } +} + +func BenchmarkSecretKey_Marshal(b *testing.B) { + key, err := blst.RandKey() + require.NoError(b, err) + d := key.Marshal() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := blst.SecretKeyFromBytes(d) + _ = err + } +} diff --git a/crypto/bls/blst/doc.go b/crypto/bls/blst/doc.go new file mode 100644 index 0000000000..b5f8850833 --- /dev/null +++ b/crypto/bls/blst/doc.go @@ -0,0 +1,6 @@ +// Package blst implements a go-wrapper around a library implementing the +// BLS12-381 curve and signature scheme. This package exposes a public API for +// verifying and aggregating BLS signatures used by Ethereum. +// +// This implementation uses the library written by Supranational, blst. +package blst diff --git a/crypto/bls/blst/init.go b/crypto/bls/blst/init.go new file mode 100644 index 0000000000..ab8be94ca5 --- /dev/null +++ b/crypto/bls/blst/init.go @@ -0,0 +1,18 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst + +import ( + "runtime" + + blst "github.com/supranational/blst/bindings/go" +) + +func init() { + // Reserve 1 core for general application work + maxProcs := runtime.GOMAXPROCS(0) - 1 + if maxProcs <= 0 { + maxProcs = 1 + } + blst.SetMaxProcs(maxProcs) +} diff --git a/crypto/bls/blst/public_key.go b/crypto/bls/blst/public_key.go new file mode 100644 index 0000000000..f543241057 --- /dev/null +++ b/crypto/bls/blst/public_key.go @@ -0,0 +1,114 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst + +import ( + "fmt" + "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/params" + lru "github.com/hashicorp/golang-lru" + "github.com/pkg/errors" +) + +var maxKeys = 1000000 +var pubkeyCache, _ = lru.New(maxKeys) + +// PublicKey used in the BLS signature scheme. +type PublicKey struct { + p *blstPublicKey +} + +// PublicKeyFromBytes creates a BLS public key from a BigEndian byte slice. +func PublicKeyFromBytes(pubKey []byte) (common.PublicKey, error) { + if len(pubKey) != params.BLSPubkeyLength { + return nil, fmt.Errorf("public key must be %d bytes", params.BLSPubkeyLength) + } + newKey := (*[params.BLSPubkeyLength]byte)(pubKey) + if cv, ok := pubkeyCache.Get(*newKey); ok { + return cv.(*PublicKey).Copy(), nil + } + // Subgroup check NOT done when decompressing pubkey. + p := new(blstPublicKey).Uncompress(pubKey) + if p == nil { + return nil, errors.New("could not unmarshal bytes into public key") + } + // Subgroup and infinity check + if !p.KeyValidate() { + // NOTE: the error is not quite accurate since it includes group check + return nil, common.ErrInfinitePubKey + } + pubKeyObj := &PublicKey{p: p} + copiedKey := pubKeyObj.Copy() + cacheKey := *newKey + pubkeyCache.Add(cacheKey, copiedKey) + return pubKeyObj, nil +} + +// AggregatePublicKeys aggregates the provided raw public keys into a single key. +func AggregatePublicKeys(pubs [][]byte) (common.PublicKey, error) { + if len(pubs) == 0 { + return nil, errors.New("nil or empty public keys") + } + agg := new(blstAggregatePublicKey) + mulP1 := make([]*blstPublicKey, 0, len(pubs)) + for _, pubkey := range pubs { + pubKeyObj, err := PublicKeyFromBytes(pubkey) + if err != nil { + return nil, err + } + mulP1 = append(mulP1, pubKeyObj.(*PublicKey).p) + } + // No group check needed here since it is done in PublicKeyFromBytes + // Note the checks could be moved from PublicKeyFromBytes into Aggregate + // and take advantage of multi-threading. + agg.Aggregate(mulP1, false) + return &PublicKey{p: agg.ToAffine()}, nil +} + +// Marshal a public key into a LittleEndian byte slice. +func (p *PublicKey) Marshal() []byte { + return p.p.Compress() +} + +// Copy the public key to a new pointer reference. +func (p *PublicKey) Copy() common.PublicKey { + np := *p.p + return &PublicKey{p: &np} +} + +// IsInfinite checks if the public key is infinite. +func (p *PublicKey) IsInfinite() bool { + zeroKey := new(blstPublicKey) + return p.p.Equals(zeroKey) +} + +// Equals checks if the provided public key is equal to +// the current one. +func (p *PublicKey) Equals(p2 common.PublicKey) bool { + return p.p.Equals(p2.(*PublicKey).p) +} + +// Aggregate two public keys. +func (p *PublicKey) Aggregate(p2 common.PublicKey) common.PublicKey { + agg := new(blstAggregatePublicKey) + // No group check here since it is checked at decompression time + agg.Add(p.p, false) + agg.Add(p2.(*PublicKey).p, false) + p.p = agg.ToAffine() + + return p +} + +// AggregateMultiplePubkeys aggregates the provided decompressed keys into a single key. +func AggregateMultiplePubkeys(pubkeys []common.PublicKey) common.PublicKey { + mulP1 := make([]*blstPublicKey, 0, len(pubkeys)) + for _, pubkey := range pubkeys { + mulP1 = append(mulP1, pubkey.(*PublicKey).p) + } + agg := new(blstAggregatePublicKey) + // No group check needed here since it is done in PublicKeyFromBytes + // Note the checks could be moved from PublicKeyFromBytes into Aggregate + // and take advantage of multi-threading. + agg.Aggregate(mulP1, false) + return &PublicKey{p: agg.ToAffine()} +} diff --git a/crypto/bls/blst/public_key_test.go b/crypto/bls/blst/public_key_test.go new file mode 100644 index 0000000000..9b9ed5408e --- /dev/null +++ b/crypto/bls/blst/public_key_test.go @@ -0,0 +1,97 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst_test + +import ( + "bytes" + "errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + + "github.com/ethereum/go-ethereum/crypto/bls/blst" + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +func TestPublicKeyFromBytes(t *testing.T) { + tests := []struct { + name string + input []byte + err error + }{ + { + name: "Nil", + err: errors.New("public key must be 48 bytes"), + }, + { + name: "Empty", + input: []byte{}, + err: errors.New("public key must be 48 bytes"), + }, + { + name: "Short", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("public key must be 48 bytes"), + }, + { + name: "Long", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("public key must be 48 bytes"), + }, + { + name: "Bad", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("could not unmarshal bytes into public key"), + }, + { + name: "Good", + input: []byte{0xa9, 0x9a, 0x76, 0xed, 0x77, 0x96, 0xf7, 0xbe, 0x22, 0xd5, 0xb7, 0xe8, 0x5d, 0xee, 0xb7, 0xc5, 0x67, 0x7e, 0x88, 0xe5, 0x11, 0xe0, 0xb3, 0x37, 0x61, 0x8f, 0x8c, 0x4e, 0xb6, 0x13, 0x49, 0xb4, 0xbf, 0x2d, 0x15, 0x3f, 0x64, 0x9f, 0x7b, 0x53, 0x35, 0x9f, 0xe8, 0xb9, 0x4a, 0x38, 0xe4, 0x4c}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := blst.PublicKeyFromBytes(test.input) + if test.err != nil { + assert.NotEqual(t, nil, err, "No error returned") + assert.ErrorContains(t, test.err, err.Error(), "Unexpected error returned") + } else { + assert.NoError(t, err) + assert.Equal(t, 0, bytes.Compare(res.Marshal(), test.input)) + } + }) + } +} + +func TestPublicKey_Copy(t *testing.T) { + priv, err := blst.RandKey() + require.NoError(t, err) + pubkeyA := priv.PublicKey() + pubkeyBytes := pubkeyA.Marshal() + + pubkeyB := pubkeyA.Copy() + priv2, err := blst.RandKey() + require.NoError(t, err) + pubkeyB.Aggregate(priv2.PublicKey()) + require.Equal(t, pubkeyA.Marshal(), pubkeyBytes, "Pubkey was mutated after copy") +} + +func TestPublicKey_Aggregate(t *testing.T) { + priv, err := blst.RandKey() + require.NoError(t, err) + pubkeyA := priv.PublicKey() + + pubkeyB := pubkeyA.Copy() + priv2, err := blst.RandKey() + require.NoError(t, err) + resKey := pubkeyB.Aggregate(priv2.PublicKey()) + + aggKey := blst.AggregateMultiplePubkeys([]common.PublicKey{priv.PublicKey(), priv2.PublicKey()}) + require.Equal(t, resKey.Marshal(), aggKey.Marshal(), "Pubkey does not match up") +} + +func TestPublicKeysEmpty(t *testing.T) { + var pubs [][]byte + _, err := blst.AggregatePublicKeys(pubs) + require.ErrorContains(t, err, "nil or empty public keys") +} diff --git a/crypto/bls/blst/secret_key.go b/crypto/bls/blst/secret_key.go new file mode 100644 index 0000000000..498922d3a3 --- /dev/null +++ b/crypto/bls/blst/secret_key.go @@ -0,0 +1,83 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst + +import ( + "crypto/subtle" + "fmt" + "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/crypto/rand" + "github.com/ethereum/go-ethereum/params" + blst "github.com/supranational/blst/bindings/go" +) + +// bls12SecretKey used in the BLS signature scheme. +type bls12SecretKey struct { + p *blst.SecretKey +} + +// RandKey creates a new private key using a random method provided as an io.Reader. +func RandKey() (common.SecretKey, error) { + // Generate 32 bytes of randomness + var ikm [32]byte + _, err := rand.NewGenerator().Read(ikm[:]) + if err != nil { + return nil, err + } + // Defensive check, that we have not generated a secret key, + secKey := &bls12SecretKey{blst.KeyGen(ikm[:])} + if IsZero(secKey.Marshal()) { + return nil, common.ErrZeroKey + } + return secKey, nil +} + +// SecretKeyFromBytes creates a BLS private key from a BigEndian byte slice. +func SecretKeyFromBytes(privKey []byte) (common.SecretKey, error) { + if len(privKey) != params.BLSSecretKeyLength { + return nil, fmt.Errorf("secret key must be %d bytes", params.BLSSecretKeyLength) + } + secKey := new(blst.SecretKey).Deserialize(privKey) + if secKey == nil { + return nil, common.ErrSecretUnmarshal + } + wrappedKey := &bls12SecretKey{p: secKey} + if IsZero(privKey) { + return nil, common.ErrZeroKey + } + return wrappedKey, nil +} + +// PublicKey obtains the public key corresponding to the BLS secret key. +func (s *bls12SecretKey) PublicKey() common.PublicKey { + return &PublicKey{p: new(blstPublicKey).From(s.p)} +} + +// IsZero checks if the secret key is a zero key. +func IsZero(sKey []byte) bool { + b := byte(0) + for _, s := range sKey { + b |= s + } + return subtle.ConstantTimeByteEq(b, 0) == 1 +} + +// Sign a message using a secret key - in a beacon/validator client. +// +// In IETF draft BLS specification: +// Sign(SK, message) -> signature: a signing algorithm that generates +// +// a deterministic signature given a secret key SK and a message. +// +// In Ethereum proof of stake specification: +// def Sign(SK: int, message: Bytes) -> BLSSignature +func (s *bls12SecretKey) Sign(msg []byte) common.Signature { + signature := new(blstSignature).Sign(s.p, msg, dst) + return &Signature{s: signature} +} + +// Marshal a secret key into a LittleEndian byte slice. +func (s *bls12SecretKey) Marshal() []byte { + keyBytes := s.p.Serialize() + return keyBytes +} diff --git a/crypto/bls/blst/secret_key_test.go b/crypto/bls/blst/secret_key_test.go new file mode 100644 index 0000000000..0d440ace4a --- /dev/null +++ b/crypto/bls/blst/secret_key_test.go @@ -0,0 +1,98 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst_test + +import ( + "bytes" + "crypto/rand" + "errors" + byteutil "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + + "github.com/ethereum/go-ethereum/crypto/bls/blst" + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +func TestMarshalUnmarshal(t *testing.T) { + priv, err := blst.RandKey() + require.NoError(t, err) + b := priv.Marshal() + b32 := byteutil.ToBytes32(b) + pk, err := blst.SecretKeyFromBytes(b32[:]) + require.NoError(t, err) + pk2, err := blst.SecretKeyFromBytes(b32[:]) + require.NoError(t, err) + assert.Equal(t, pk.Marshal(), pk2.Marshal(), "Keys not equal") +} + +func TestSecretKeyFromBytes(t *testing.T) { + tests := []struct { + name string + input []byte + err error + }{ + { + name: "Nil", + err: errors.New("secret key must be 32 bytes"), + }, + { + name: "Empty", + input: []byte{}, + err: errors.New("secret key must be 32 bytes"), + }, + { + name: "Short", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("secret key must be 32 bytes"), + }, + { + name: "Long", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("secret key must be 32 bytes"), + }, + { + name: "Bad", + input: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + err: common.ErrSecretUnmarshal, + }, + { + name: "Good", + input: []byte{0x25, 0x29, 0x5f, 0x0d, 0x1d, 0x59, 0x2a, 0x90, 0xb3, 0x33, 0xe2, 0x6e, 0x85, 0x14, 0x97, 0x08, 0x20, 0x8e, 0x9f, 0x8e, 0x8b, 0xc1, 0x8f, 0x6c, 0x77, 0xbd, 0x62, 0xf8, 0xad, 0x7a, 0x68, 0x66}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := blst.SecretKeyFromBytes(test.input) + if test.err != nil { + assert.NotEqual(t, nil, err, "No error returned") + assert.Contains(t, err.Error(), test.err.Error(), "Unexpected error returned") + } else { + assert.NoError(t, err) + assert.Equal(t, 0, bytes.Compare(res.Marshal(), test.input)) + } + }) + } +} + +func TestSerialize(t *testing.T) { + rk, err := blst.RandKey() + require.NoError(t, err) + b := rk.Marshal() + + _, err = blst.SecretKeyFromBytes(b) + assert.NoError(t, err) +} + +func TestZeroKey(t *testing.T) { + // Is Zero + var zKey [32]byte + assert.Equal(t, true, blst.IsZero(zKey[:])) + + // Is Not Zero + _, err := rand.Read(zKey[:]) + assert.NoError(t, err) + assert.Equal(t, false, blst.IsZero(zKey[:])) +} diff --git a/crypto/bls/blst/signature.go b/crypto/bls/blst/signature.go new file mode 100644 index 0000000000..6467b065ab --- /dev/null +++ b/crypto/bls/blst/signature.go @@ -0,0 +1,269 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst + +import ( + "bytes" + "fmt" + "github.com/ethereum/go-ethereum/params" + "sync" + + "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/crypto/rand" + "github.com/pkg/errors" + blst "github.com/supranational/blst/bindings/go" +) + +var dst = []byte("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_") + +const scalarBytes = 32 +const randBitsEntropy = 64 + +// Signature used in the BLS signature scheme. +type Signature struct { + s *blstSignature +} + +// SignatureFromBytes creates a BLS signature from a LittleEndian byte slice. +func SignatureFromBytes(sig []byte) (common.Signature, error) { + if len(sig) != params.BLSSignatureLength { + return nil, fmt.Errorf("signature must be %d bytes", params.BLSSignatureLength) + } + signature := new(blstSignature).Uncompress(sig) + if signature == nil { + return nil, errors.New("could not unmarshal bytes into signature") + } + // Group check signature. Do not check for infinity since an aggregated signature + // could be infinite. + if !signature.SigValidate(false) { + return nil, errors.New("signature not in group") + } + return &Signature{s: signature}, nil +} + +// AggregateCompressedSignatures converts a list of compressed signatures into a single, aggregated sig. +func AggregateCompressedSignatures(multiSigs [][]byte) (common.Signature, error) { + signature := new(blstAggregateSignature) + valid := signature.AggregateCompressed(multiSigs, true) + if !valid { + return nil, errors.New("provided signatures fail the group check and cannot be compressed") + } + return &Signature{s: signature.ToAffine()}, nil +} + +// MultipleSignaturesFromBytes creates a group of BLS signatures from a LittleEndian 2d-byte slice. +func MultipleSignaturesFromBytes(multiSigs [][]byte) ([]common.Signature, error) { + if len(multiSigs) == 0 { + return nil, fmt.Errorf("0 signatures provided to the method") + } + for _, s := range multiSigs { + if len(s) != params.BLSSignatureLength { + return nil, fmt.Errorf("signature must be %d bytes", params.BLSSignatureLength) + } + } + multiSignatures := new(blstSignature).BatchUncompress(multiSigs) + if len(multiSignatures) == 0 { + return nil, errors.New("could not unmarshal bytes into signature") + } + if len(multiSignatures) != len(multiSigs) { + return nil, errors.Errorf("wanted %d decompressed signatures but got %d", len(multiSigs), len(multiSignatures)) + } + wrappedSigs := make([]common.Signature, len(multiSignatures)) + for i, signature := range multiSignatures { + // Group check signature. Do not check for infinity since an aggregated signature + // could be infinite. + if !signature.SigValidate(false) { + return nil, errors.New("signature not in group") + } + copiedSig := signature + wrappedSigs[i] = &Signature{s: copiedSig} + } + return wrappedSigs, nil +} + +// Verify a bls signature given a public key, a message. +// +// In IETF draft BLS specification: +// Verify(PK, message, signature) -> VALID or INVALID: a verification +// +// algorithm that outputs VALID if signature is a valid signature of +// message under public key PK, and INVALID otherwise. +// +// In the Ethereum proof of stake specification: +// def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool +func (s *Signature) Verify(pubKey common.PublicKey, msg []byte) bool { + // Signature and PKs are assumed to have been validated upon decompression! + return s.s.Verify(false, pubKey.(*PublicKey).p, false, msg, dst) +} + +// AggregateVerify verifies each public key against its respective message. This is vulnerable to +// rogue public-key attack. Each user must provide a proof-of-knowledge of the public key. +// +// Note: The msgs must be distinct. For maximum performance, this method does not ensure distinct +// messages. +// +// In IETF draft BLS specification: +// AggregateVerify((PK_1, message_1), ..., (PK_n, message_n), +// +// signature) -> VALID or INVALID: an aggregate verification +// algorithm that outputs VALID if signature is a valid aggregated +// signature for a collection of public keys and messages, and +// outputs INVALID otherwise. +// +// In the Ethereum proof of stake specification: +// def AggregateVerify(pairs: Sequence[PK: BLSPubkey, message: Bytes], signature: BLSSignature) -> bool +// +// Deprecated: Use FastAggregateVerify or use this method in spectests only. +func (s *Signature) AggregateVerify(pubKeys []common.PublicKey, msgs [][32]byte) bool { + size := len(pubKeys) + if size == 0 { + return false + } + if size != len(msgs) { + return false + } + msgSlices := make([][]byte, len(msgs)) + rawKeys := make([]*blstPublicKey, len(msgs)) + for i := 0; i < size; i++ { + msgSlices[i] = msgs[i][:] + rawKeys[i] = pubKeys[i].(*PublicKey).p + } + // Signature and PKs are assumed to have been validated upon decompression! + return s.s.AggregateVerify(false, rawKeys, false, msgSlices, dst) +} + +// FastAggregateVerify verifies all the provided public keys with their aggregated signature. +// +// In IETF draft BLS specification: +// FastAggregateVerify(PK_1, ..., PK_n, message, signature) -> VALID +// +// or INVALID: a verification algorithm for the aggregate of multiple +// signatures on the same message. This function is faster than +// AggregateVerify. +// +// In the Ethereum proof of stake specification: +// def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool +func (s *Signature) FastAggregateVerify(pubKeys []common.PublicKey, msg [32]byte) bool { + if len(pubKeys) == 0 { + return false + } + rawKeys := make([]*blstPublicKey, len(pubKeys)) + for i := 0; i < len(pubKeys); i++ { + rawKeys[i] = pubKeys[i].(*PublicKey).p + } + return s.s.FastAggregateVerify(true, rawKeys, msg[:], dst) +} + +// Eth2FastAggregateVerify implements a wrapper on top of bls's FastAggregateVerify. It accepts G2_POINT_AT_INFINITY signature +// when pubkeys empty. +// +// Spec code: +// def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: +// +// """ +// Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. +// """ +// if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: +// return True +// return bls.FastAggregateVerify(pubkeys, message, signature) +func (s *Signature) Eth2FastAggregateVerify(pubKeys []common.PublicKey, msg [32]byte) bool { + if len(pubKeys) == 0 && bytes.Equal(s.Marshal(), common.InfiniteSignature[:]) { + return true + } + return s.FastAggregateVerify(pubKeys, msg) +} + +// NewAggregateSignature creates a blank aggregate signature. +func NewAggregateSignature() common.Signature { + sig := blst.HashToG2([]byte{'m', 'o', 'c', 'k'}, dst).ToAffine() + return &Signature{s: sig} +} + +// AggregateSignatures converts a list of signatures into a single, aggregated sig. +func AggregateSignatures(sigs []common.Signature) common.Signature { + if len(sigs) == 0 { + return nil + } + + rawSigs := make([]*blstSignature, len(sigs)) + for i := 0; i < len(sigs); i++ { + rawSigs[i] = sigs[i].(*Signature).s + } + + // Signature and PKs are assumed to have been validated upon decompression! + signature := new(blstAggregateSignature) + signature.Aggregate(rawSigs, false) + return &Signature{s: signature.ToAffine()} +} + +// VerifySignature verifies a single signature using public key and message. +func VerifySignature(sig []byte, msg [32]byte, pubKey common.PublicKey) (bool, error) { + rSig, err := SignatureFromBytes(sig) + if err != nil { + return false, err + } + return rSig.Verify(pubKey, msg[:]), nil +} + +// VerifyMultipleSignatures verifies a non-singular set of signatures and its respective pubkeys and messages. +// This method provides a safe way to verify multiple signatures at once. We pick a number randomly from 1 to max +// uint64 and then multiply the signature by it. We continue doing this for all signatures and its respective pubkeys. +// S* = S_1 * r_1 + S_2 * r_2 + ... + S_n * r_n +// P'_{i,j} = P_{i,j} * r_i +// e(S*, G) = \prod_{i=1}^n \prod_{j=1}^{m_i} e(P'_{i,j}, M_{i,j}) +// Using this we can verify multiple signatures safely. +func VerifyMultipleSignatures(sigs [][]byte, msgs [][32]byte, pubKeys []common.PublicKey) (bool, error) { + if len(sigs) == 0 || len(pubKeys) == 0 { + return false, nil + } + rawSigs := new(blstSignature).BatchUncompress(sigs) + + length := len(sigs) + if length != len(pubKeys) || length != len(msgs) { + return false, errors.Errorf("provided signatures, pubkeys and messages have differing lengths. S: %d, P: %d,M %d", + length, len(pubKeys), len(msgs)) + } + mulP1Aff := make([]*blstPublicKey, length) + rawMsgs := make([]blst.Message, length) + + for i := 0; i < length; i++ { + mulP1Aff[i] = pubKeys[i].(*PublicKey).p + rawMsgs[i] = msgs[i][:] + } + // Secure source of RNG + randGen := rand.NewGenerator() + randLock := new(sync.Mutex) + + randFunc := func(scalar *blst.Scalar) { + var rbytes [scalarBytes]byte + randLock.Lock() + randGen.Read(rbytes[:]) // #nosec G104 -- Error will always be nil in `read` in math/rand + randLock.Unlock() + // Protect against the generator returning 0. Since the scalar value is + // derived from a big endian byte slice, we take the last byte. + rbytes[len(rbytes)-1] |= 0x01 + scalar.FromBEndian(rbytes[:]) + } + dummySig := new(blstSignature) + + // Validate signatures since we uncompress them here. Public keys should already be validated. + return dummySig.MultipleAggregateVerify(rawSigs, true, mulP1Aff, false, rawMsgs, dst, randFunc, randBitsEntropy), nil +} + +// Marshal a signature into a LittleEndian byte slice. +func (s *Signature) Marshal() []byte { + return s.s.Compress() +} + +// Copy returns a full deep copy of a signature. +func (s *Signature) Copy() common.Signature { + sign := *s.s + return &Signature{s: &sign} +} + +// VerifyCompressed verifies that the compressed signature and pubkey +// are valid from the message provided. +func VerifyCompressed(signature, pub, msg []byte) bool { + // Validate signature and PKs since we will uncompress them here + return new(blstSignature).VerifyCompressed(signature, true, pub, true, msg, dst) +} diff --git a/crypto/bls/blst/signature_test.go b/crypto/bls/blst/signature_test.go new file mode 100644 index 0000000000..39dbb63fa9 --- /dev/null +++ b/crypto/bls/blst/signature_test.go @@ -0,0 +1,309 @@ +//go:build ((linux && amd64) || (linux && arm64) || (darwin && amd64) || (darwin && arm64) || (windows && amd64)) && !blst_disabled + +package blst + +import ( + "bytes" + "errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +func TestSignVerify(t *testing.T) { + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + msg := []byte("hello") + sig := priv.Sign(msg) + assert.Equal(t, true, sig.Verify(pub, msg), "Signature did not verify") +} + +func TestAggregateVerify(t *testing.T) { + pubkeys := make([]common.PublicKey, 0, 100) + sigs := make([]common.Signature, 0, 100) + var msgs [][32]byte + for i := 0; i < 100; i++ { + msg := [32]byte{'h', 'e', 'l', 'l', 'o', byte(i)} + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + sig := priv.Sign(msg[:]) + pubkeys = append(pubkeys, pub) + sigs = append(sigs, sig) + msgs = append(msgs, msg) + } + aggSig := AggregateSignatures(sigs) + // skipcq: GO-W1009 + assert.Equal(t, true, aggSig.AggregateVerify(pubkeys, msgs), "Signature did not verify") +} + +func TestAggregateVerify_CompressedSignatures(t *testing.T) { + pubkeys := make([]common.PublicKey, 0, 100) + sigs := make([]common.Signature, 0, 100) + var sigBytes [][]byte + var msgs [][32]byte + for i := 0; i < 100; i++ { + msg := [32]byte{'h', 'e', 'l', 'l', 'o', byte(i)} + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + sig := priv.Sign(msg[:]) + pubkeys = append(pubkeys, pub) + sigs = append(sigs, sig) + sigBytes = append(sigBytes, sig.Marshal()) + msgs = append(msgs, msg) + } + aggSig := AggregateSignatures(sigs) + // skipcq: GO-W1009 + assert.Equal(t, true, aggSig.AggregateVerify(pubkeys, msgs), "Signature did not verify") + + aggSig2, err := AggregateCompressedSignatures(sigBytes) + assert.NoError(t, err) + assert.Equal(t, aggSig.Marshal(), aggSig2.Marshal(), "Signature did not match up") +} + +func TestFastAggregateVerify(t *testing.T) { + pubkeys := make([]common.PublicKey, 0, 100) + sigs := make([]common.Signature, 0, 100) + msg := [32]byte{'h', 'e', 'l', 'l', 'o'} + for i := 0; i < 100; i++ { + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + sig := priv.Sign(msg[:]) + pubkeys = append(pubkeys, pub) + sigs = append(sigs, sig) + } + aggSig := AggregateSignatures(sigs) + assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify") + +} + +func TestVerifyCompressed(t *testing.T) { + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + msg := []byte("hello") + sig := priv.Sign(msg) + assert.Equal(t, true, sig.Verify(pub, msg), "Non compressed signature did not verify") + assert.Equal(t, true, VerifyCompressed(sig.Marshal(), pub.Marshal(), msg), "Compressed signatures and pubkeys did not verify") +} + +func TestVerifySingleSignature_InvalidSignature(t *testing.T) { + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + msgA := [32]byte{'h', 'e', 'l', 'l', 'o'} + msgB := [32]byte{'o', 'l', 'l', 'e', 'h'} + sigA := priv.Sign(msgA[:]).Marshal() + valid, err := VerifySignature(sigA, msgB, pub) + assert.NoError(t, err) + assert.Equal(t, false, valid, "Signature did verify") +} + +func TestVerifySingleSignature_ValidSignature(t *testing.T) { + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + msg := [32]byte{'h', 'e', 'l', 'l', 'o'} + sig := priv.Sign(msg[:]).Marshal() + valid, err := VerifySignature(sig, msg, pub) + assert.NoError(t, err) + assert.Equal(t, true, valid, "Signature did not verify") +} + +func TestMultipleSignatureVerification(t *testing.T) { + pubkeys := make([]common.PublicKey, 0, 100) + sigs := make([][]byte, 0, 100) + var msgs [][32]byte + for i := 0; i < 100; i++ { + msg := [32]byte{'h', 'e', 'l', 'l', 'o', byte(i)} + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + sig := priv.Sign(msg[:]).Marshal() + pubkeys = append(pubkeys, pub) + sigs = append(sigs, sig) + msgs = append(msgs, msg) + } + verify, err := VerifyMultipleSignatures(sigs, msgs, pubkeys) + assert.NoError(t, err, "Signature did not verify") + assert.Equal(t, true, verify, "Signature did not verify") +} + +func TestFastAggregateVerify_ReturnsFalseOnEmptyPubKeyList(t *testing.T) { + var pubkeys []common.PublicKey + msg := [32]byte{'h', 'e', 'l', 'l', 'o'} + + aggSig := NewAggregateSignature() + assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, msg), "Expected FastAggregateVerify to return false with empty input ") +} + +func TestEth2FastAggregateVerify(t *testing.T) { + pubkeys := make([]common.PublicKey, 0, 100) + sigs := make([]common.Signature, 0, 100) + msg := [32]byte{'h', 'e', 'l', 'l', 'o'} + for i := 0; i < 100; i++ { + priv, err := RandKey() + require.NoError(t, err) + pub := priv.PublicKey() + sig := priv.Sign(msg[:]) + pubkeys = append(pubkeys, pub) + sigs = append(sigs, sig) + } + aggSig := AggregateSignatures(sigs) + assert.Equal(t, true, aggSig.Eth2FastAggregateVerify(pubkeys, msg), "Signature did not verify") + +} + +func TestEth2FastAggregateVerify_ReturnsFalseOnEmptyPubKeyList(t *testing.T) { + var pubkeys []common.PublicKey + msg := [32]byte{'h', 'e', 'l', 'l', 'o'} + + aggSig := NewAggregateSignature() + assert.Equal(t, false, aggSig.Eth2FastAggregateVerify(pubkeys, msg), "Expected Eth2FastAggregateVerify to return false with empty input ") +} + +func TestEth2FastAggregateVerify_ReturnsTrueOnG2PointAtInfinity(t *testing.T) { + var pubkeys []common.PublicKey + msg := [32]byte{'h', 'e', 'l', 'l', 'o'} + + g2PointAtInfinity := append([]byte{0xC0}, make([]byte, 95)...) + aggSig, err := SignatureFromBytes(g2PointAtInfinity) + require.NoError(t, err) + assert.Equal(t, true, aggSig.Eth2FastAggregateVerify(pubkeys, msg)) +} + +func TestSignatureFromBytes(t *testing.T) { + tests := []struct { + name string + input []byte + err error + }{ + { + name: "Nil", + err: errors.New("signature must be 96 bytes"), + }, + { + name: "Empty", + input: []byte{}, + err: errors.New("signature must be 96 bytes"), + }, + { + name: "Short", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("signature must be 96 bytes"), + }, + { + name: "Long", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("signature must be 96 bytes"), + }, + { + name: "Bad", + input: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + err: errors.New("could not unmarshal bytes into signature"), + }, + { + name: "Good", + input: []byte{0xab, 0xb0, 0x12, 0x4c, 0x75, 0x74, 0xf2, 0x81, 0xa2, 0x93, 0xf4, 0x18, 0x5c, 0xad, 0x3c, 0xb2, 0x26, 0x81, 0xd5, 0x20, 0x91, 0x7c, 0xe4, 0x66, 0x65, 0x24, 0x3e, 0xac, 0xb0, 0x51, 0x00, 0x0d, 0x8b, 0xac, 0xf7, 0x5e, 0x14, 0x51, 0x87, 0x0c, 0xa6, 0xb3, 0xb9, 0xe6, 0xc9, 0xd4, 0x1a, 0x7b, 0x02, 0xea, 0xd2, 0x68, 0x5a, 0x84, 0x18, 0x8a, 0x4f, 0xaf, 0xd3, 0x82, 0x5d, 0xaf, 0x6a, 0x98, 0x96, 0x25, 0xd7, 0x19, 0xcc, 0xd2, 0xd8, 0x3a, 0x40, 0x10, 0x1f, 0x4a, 0x45, 0x3f, 0xca, 0x62, 0x87, 0x8c, 0x89, 0x0e, 0xca, 0x62, 0x23, 0x63, 0xf9, 0xdd, 0xb8, 0xf3, 0x67, 0xa9, 0x1e, 0x84}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := SignatureFromBytes(test.input) + if test.err != nil { + assert.NotEqual(t, nil, err, "No error returned") + assert.ErrorContains(t, test.err, err.Error(), "Unexpected error returned") + } else { + assert.NoError(t, err) + assert.Equal(t, 0, bytes.Compare(res.Marshal(), test.input)) + } + }) + } +} + +func TestMultipleSignatureFromBytes(t *testing.T) { + tests := []struct { + name string + input [][]byte + err error + }{ + { + name: "Nil", + err: errors.New("0 signatures provided to the method"), + }, + { + name: "Empty", + input: [][]byte{}, + err: errors.New("0 signatures provided to the method"), + }, + { + name: "Short", + input: [][]byte{{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + err: errors.New("signature must be 96 bytes"), + }, + { + name: "Long", + input: [][]byte{{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + err: errors.New("signature must be 96 bytes"), + }, + { + name: "Bad", + input: [][]byte{{0x8f, 0xc0, 0xb4, 0x9e, 0x2e, 0xac, 0x50, 0x86, 0xe2, 0xe2, 0xaa, 0xf, 0xdc, 0x54, 0x23, 0x51, 0x6, 0xd8, 0x29, 0xf5, 0xae, 0x3, 0x5d, 0xb8, 0x31, 0x4d, 0x26, 0x3, 0x48, 0x18, 0xb9, 0x1f, 0x6b, 0xd7, 0x86, 0xb4, 0xa2, 0x69, 0xc7, 0xe7, 0xf5, 0xc0, 0x93, 0x19, 0x6e, 0xfd, 0x33, 0xb8, 0x1, 0xe1, 0x1f, 0x4e, 0xb4, 0xb1, 0xa0, 0x1, 0x30, 0x48, 0x8a, 0x6c, 0x97, 0x29, 0xd6, 0xcb, 0x1c, 0x45, 0xef, 0x87, 0xba, 0x4f, 0xce, 0x22, 0x84, 0x48, 0xad, 0x16, 0xf7, 0x5c, 0xb2, 0xa8, 0x34, 0xb9, 0xee, 0xb8, 0xbf, 0xe5, 0x58, 0x2c, 0x44, 0x7b, 0x1f, 0x9c, 0x22, 0x26, 0x3a, 0x22}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + err: errors.New("could not unmarshal bytes into signature"), + }, + { + name: "Good", + input: [][]byte{ + {0xab, 0xb0, 0x12, 0x4c, 0x75, 0x74, 0xf2, 0x81, 0xa2, 0x93, 0xf4, 0x18, 0x5c, 0xad, 0x3c, 0xb2, 0x26, 0x81, 0xd5, 0x20, 0x91, 0x7c, 0xe4, 0x66, 0x65, 0x24, 0x3e, 0xac, 0xb0, 0x51, 0x00, 0x0d, 0x8b, 0xac, 0xf7, 0x5e, 0x14, 0x51, 0x87, 0x0c, 0xa6, 0xb3, 0xb9, 0xe6, 0xc9, 0xd4, 0x1a, 0x7b, 0x02, 0xea, 0xd2, 0x68, 0x5a, 0x84, 0x18, 0x8a, 0x4f, 0xaf, 0xd3, 0x82, 0x5d, 0xaf, 0x6a, 0x98, 0x96, 0x25, 0xd7, 0x19, 0xcc, 0xd2, 0xd8, 0x3a, 0x40, 0x10, 0x1f, 0x4a, 0x45, 0x3f, 0xca, 0x62, 0x87, 0x8c, 0x89, 0x0e, 0xca, 0x62, 0x23, 0x63, 0xf9, 0xdd, 0xb8, 0xf3, 0x67, 0xa9, 0x1e, 0x84}, + {0xb7, 0x86, 0xe5, 0x7, 0x43, 0xe2, 0x53, 0x6c, 0x15, 0x51, 0x9c, 0x6, 0x2a, 0xa7, 0xe5, 0x12, 0xf9, 0xb7, 0x77, 0x93, 0x3f, 0x55, 0xb3, 0xaf, 0x38, 0xf7, 0x39, 0xe4, 0x84, 0x6d, 0x88, 0x44, 0x52, 0x77, 0x65, 0x42, 0x95, 0xd9, 0x79, 0x93, 0x7e, 0xc8, 0x12, 0x60, 0xe3, 0x24, 0xea, 0x8, 0x10, 0x52, 0xcd, 0xd2, 0x7f, 0x5d, 0x25, 0x3a, 0xa8, 0x9b, 0xb7, 0x65, 0xa9, 0x31, 0xea, 0x7c, 0x85, 0x13, 0x53, 0xc0, 0xa3, 0x88, 0xd1, 0xa5, 0x54, 0x85, 0x2, 0x2d, 0xf8, 0xa1, 0xd7, 0xc1, 0x60, 0x58, 0x93, 0xec, 0x7c, 0xf9, 0x33, 0x43, 0x4, 0x48, 0x40, 0x97, 0xef, 0x67, 0x2a, 0x27}, + {0xb2, 0x12, 0xd0, 0xec, 0x46, 0x76, 0x6b, 0x24, 0x71, 0x91, 0x2e, 0xa8, 0x53, 0x9a, 0x48, 0xa3, 0x78, 0x30, 0xc, 0xe8, 0xf0, 0x86, 0xa3, 0x68, 0xec, 0xe8, 0x96, 0x43, 0x34, 0xda, 0xf, 0xf4, 0x65, 0x48, 0xbb, 0xe0, 0x92, 0xa1, 0x8, 0x12, 0x18, 0x46, 0xe6, 0x4a, 0xd6, 0x92, 0x88, 0xe, 0x2, 0xf5, 0xf3, 0x2a, 0x96, 0xb1, 0x4, 0xf1, 0x11, 0xa9, 0x92, 0x79, 0x52, 0x0, 0x64, 0x34, 0xeb, 0x25, 0xe, 0xf4, 0x29, 0x6b, 0x39, 0x4e, 0x28, 0x78, 0xfe, 0x25, 0xa3, 0xc0, 0x88, 0x5a, 0x40, 0xfd, 0x71, 0x37, 0x63, 0x79, 0xcd, 0x6b, 0x56, 0xda, 0xee, 0x91, 0x26, 0x72, 0xfc, 0xbc}, + {0x8f, 0xc0, 0xb4, 0x9e, 0x2e, 0xac, 0x50, 0x86, 0xe2, 0xe2, 0xaa, 0xf, 0xdc, 0x54, 0x23, 0x51, 0x6, 0xd8, 0x29, 0xf5, 0xae, 0x3, 0x5d, 0xb8, 0x31, 0x4d, 0x26, 0x3, 0x48, 0x18, 0xb9, 0x1f, 0x6b, 0xd7, 0x86, 0xb4, 0xa2, 0x69, 0xc7, 0xe7, 0xf5, 0xc0, 0x93, 0x19, 0x6e, 0xfd, 0x33, 0xb8, 0x1, 0xe1, 0x1f, 0x4e, 0xb4, 0xb1, 0xa0, 0x1, 0x30, 0x48, 0x8a, 0x6c, 0x97, 0x29, 0xd6, 0xcb, 0x1c, 0x45, 0xef, 0x87, 0xba, 0x4f, 0xce, 0x22, 0x84, 0x48, 0xad, 0x16, 0xf7, 0x5c, 0xb2, 0xa8, 0x34, 0xb9, 0xee, 0xb8, 0xbf, 0xe5, 0x58, 0x2c, 0x44, 0x7b, 0x1f, 0x9c, 0x22, 0x26, 0x3a, 0x22}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := MultipleSignaturesFromBytes(test.input) + if test.err != nil { + assert.NotEqual(t, nil, err, "No error returned") + assert.ErrorContains(t, test.err, err.Error(), "Unexpected error returned") + } else { + assert.NoError(t, err) + for i, s := range res { + assert.Equal(t, 0, bytes.Compare(s.Marshal(), test.input[i])) + } + } + }) + } +} + +func TestCopy(t *testing.T) { + priv, err := RandKey() + require.NoError(t, err) + key, ok := priv.(*bls12SecretKey) + require.Equal(t, true, ok) + + signatureA := &Signature{s: new(blstSignature).Sign(key.p, []byte("foo"), dst)} + signatureB, ok := signatureA.Copy().(*Signature) + require.Equal(t, true, ok) + + if signatureA == signatureB { + t.Fatalf("%#v expected not equal to %#v", signatureA, signatureB) + } + + if signatureA.s == signatureB.s { + t.Fatalf("%#v expected not equal to %#v", signatureA.s, signatureB.s) + } + assert.Equal(t, signatureA, signatureB) + + signatureA.s.Sign(key.p, []byte("bar"), dst) + assert.NotEqual(t, signatureA, signatureB) +} diff --git a/crypto/bls/blst/stub.go b/crypto/bls/blst/stub.go new file mode 100644 index 0000000000..a75e7e7338 --- /dev/null +++ b/crypto/bls/blst/stub.go @@ -0,0 +1,154 @@ +//go:build blst_disabled + +package blst + +import ( + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +// This stub file exists until build issues can be resolved for libfuzz. +const err = "blst is only supported on linux,darwin,windows" + +// SecretKey -- stub +type SecretKey struct{} + +// PublicKey -- stub +func (s SecretKey) PublicKey() common.PublicKey { + panic(err) +} + +// Sign -- stub +func (s SecretKey) Sign(_ []byte) common.Signature { + panic(err) +} + +// Marshal -- stub +func (s SecretKey) Marshal() []byte { + panic(err) +} + +// IsZero -- stub +func (s SecretKey) IsZero() bool { + panic(err) +} + +// PublicKey -- stub +type PublicKey struct{} + +// Marshal -- stub +func (p PublicKey) Marshal() []byte { + panic(err) +} + +// Copy -- stub +func (p PublicKey) Copy() common.PublicKey { + panic(err) +} + +// Aggregate -- stub +func (p PublicKey) Aggregate(_ common.PublicKey) common.PublicKey { + panic(err) +} + +// IsInfinite -- stub +func (p PublicKey) IsInfinite() bool { + panic(err) +} + +// Equals -- stub +func (p PublicKey) Equals(_ common.PublicKey) bool { + panic(err) +} + +// Signature -- stub +type Signature struct{} + +// Verify -- stub +func (s Signature) Verify(_ common.PublicKey, _ []byte) bool { + panic(err) +} + +// AggregateVerify -- stub +func (s Signature) AggregateVerify(_ []common.PublicKey, _ [][32]byte) bool { + panic(err) +} + +// FastAggregateVerify -- stub +func (s Signature) FastAggregateVerify(_ []common.PublicKey, _ [32]byte) bool { + panic(err) +} + +// Eth2FastAggregateVerify -- stub +func (s Signature) Eth2FastAggregateVerify(_ []common.PublicKey, _ [32]byte) bool { + panic(err) +} + +// Marshal -- stub +func (s Signature) Marshal() []byte { + panic(err) +} + +// Copy -- stub +func (s Signature) Copy() common.Signature { + panic(err) +} + +// SecretKeyFromBytes -- stub +func SecretKeyFromBytes(_ []byte) (SecretKey, error) { + panic(err) +} + +// PublicKeyFromBytes -- stub +func PublicKeyFromBytes(_ []byte) (PublicKey, error) { + panic(err) +} + +// SignatureFromBytes -- stub +func SignatureFromBytes(_ []byte) (Signature, error) { + panic(err) +} + +// MultipleSignaturesFromBytes -- stub +func MultipleSignaturesFromBytes(multiSigs [][]byte) ([]common.Signature, error) { + panic(err) +} + +// AggregatePublicKeys -- stub +func AggregatePublicKeys(_ [][]byte) (PublicKey, error) { + panic(err) +} + +// AggregateSignatures -- stub +func AggregateSignatures(_ []common.Signature) common.Signature { + panic(err) +} + +// AggregateMultiplePubkeys -- stub +func AggregateMultiplePubkeys(pubs []common.PublicKey) common.PublicKey { + panic(err) +} + +// AggregateCompressedSignatures -- stub +func AggregateCompressedSignatures(multiSigs [][]byte) (common.Signature, error) { + panic(err) +} + +// VerifyMultipleSignatures -- stub +func VerifyMultipleSignatures(_ [][]byte, _ [][32]byte, _ []common.PublicKey) (bool, error) { + panic(err) +} + +// NewAggregateSignature -- stub +func NewAggregateSignature() common.Signature { + panic(err) +} + +// RandKey -- stub +func RandKey() (common.SecretKey, error) { + panic(err) +} + +// VerifyCompressed -- stub +func VerifyCompressed(_, _, _ []byte) bool { + panic(err) +} diff --git a/crypto/bls/common/constants.go b/crypto/bls/common/constants.go new file mode 100644 index 0000000000..e9e171fb23 --- /dev/null +++ b/crypto/bls/common/constants.go @@ -0,0 +1,12 @@ +package common + +import "github.com/ethereum/go-ethereum/params" + +// ZeroSecretKey represents a zero secret key. +var ZeroSecretKey = [32]byte{} + +// InfinitePublicKey represents an infinite public key (G1 Point at Infinity). +var InfinitePublicKey = [params.BLSPubkeyLength]byte{0xC0} + +// InfiniteSignature represents an infinite signature (G2 Point at Infinity). +var InfiniteSignature = [96]byte{0xC0} diff --git a/crypto/bls/common/error.go b/crypto/bls/common/error.go new file mode 100644 index 0000000000..13cc48eaaa --- /dev/null +++ b/crypto/bls/common/error.go @@ -0,0 +1,13 @@ +package common + +import "errors" + +// ErrZeroKey describes an error due to a zero secret key. +var ErrZeroKey = errors.New("received secret key is zero") + +// ErrSecretUnmarshal describes an error which happens during unmarshalling +// a secret key. +var ErrSecretUnmarshal = errors.New("could not unmarshal bytes into secret key") + +// ErrInfinitePubKey describes an error due to an infinite public key. +var ErrInfinitePubKey = errors.New("received an infinite public key") diff --git a/crypto/bls/common/interface.go b/crypto/bls/common/interface.go new file mode 100644 index 0000000000..db2c770111 --- /dev/null +++ b/crypto/bls/common/interface.go @@ -0,0 +1,33 @@ +// Package common provides the BLS interfaces that are implemented by the various BLS wrappers. +// +// This package should not be used by downstream consumers. These interfaces are re-exporter by +// github.com/ethereum/go-ethereum/crypto/bls. This package exists to prevent an import circular +// dependency. +package common + +// SecretKey represents a BLS secret or private key. +type SecretKey interface { + PublicKey() PublicKey + Sign(msg []byte) Signature + Marshal() []byte +} + +// PublicKey represents a BLS public key. +type PublicKey interface { + Marshal() []byte + Copy() PublicKey + Aggregate(p2 PublicKey) PublicKey + IsInfinite() bool + Equals(p2 PublicKey) bool +} + +// Signature represents a BLS signature. +type Signature interface { + Verify(pubKey PublicKey, msg []byte) bool + // Deprecated: Use FastAggregateVerify or use this method in spectests only. + AggregateVerify(pubKeys []PublicKey, msgs [][32]byte) bool + FastAggregateVerify(pubKeys []PublicKey, msg [32]byte) bool + Eth2FastAggregateVerify(pubKeys []PublicKey, msg [32]byte) bool + Marshal() []byte + Copy() Signature +} diff --git a/crypto/bls/common/mock/interface_mock.go b/crypto/bls/common/mock/interface_mock.go new file mode 100644 index 0000000000..060f635e19 --- /dev/null +++ b/crypto/bls/common/mock/interface_mock.go @@ -0,0 +1,277 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: crypto/bls/common/interface.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + common "github.com/ethereum/go-ethereum/crypto/bls/common" + gomock "github.com/golang/mock/gomock" +) + +// MockSecretKey is a mock of SecretKey interface. +type MockSecretKey struct { + ctrl *gomock.Controller + recorder *MockSecretKeyMockRecorder +} + +// MockSecretKeyMockRecorder is the mock recorder for MockSecretKey. +type MockSecretKeyMockRecorder struct { + mock *MockSecretKey +} + +// NewMockSecretKey creates a new mock instance. +func NewMockSecretKey(ctrl *gomock.Controller) *MockSecretKey { + mock := &MockSecretKey{ctrl: ctrl} + mock.recorder = &MockSecretKeyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSecretKey) EXPECT() *MockSecretKeyMockRecorder { + return m.recorder +} + +// Marshal mocks base method. +func (m *MockSecretKey) Marshal() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Marshal") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Marshal indicates an expected call of Marshal. +func (mr *MockSecretKeyMockRecorder) Marshal() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshal", reflect.TypeOf((*MockSecretKey)(nil).Marshal)) +} + +// PublicKey mocks base method. +func (m *MockSecretKey) PublicKey() common.PublicKey { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(common.PublicKey) + return ret0 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockSecretKeyMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockSecretKey)(nil).PublicKey)) +} + +// Sign mocks base method. +func (m *MockSecretKey) Sign(msg []byte) common.Signature { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sign", msg) + ret0, _ := ret[0].(common.Signature) + return ret0 +} + +// Sign indicates an expected call of Sign. +func (mr *MockSecretKeyMockRecorder) Sign(msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockSecretKey)(nil).Sign), msg) +} + +// MockPublicKey is a mock of PublicKey interface. +type MockPublicKey struct { + ctrl *gomock.Controller + recorder *MockPublicKeyMockRecorder +} + +// MockPublicKeyMockRecorder is the mock recorder for MockPublicKey. +type MockPublicKeyMockRecorder struct { + mock *MockPublicKey +} + +// NewMockPublicKey creates a new mock instance. +func NewMockPublicKey(ctrl *gomock.Controller) *MockPublicKey { + mock := &MockPublicKey{ctrl: ctrl} + mock.recorder = &MockPublicKeyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPublicKey) EXPECT() *MockPublicKeyMockRecorder { + return m.recorder +} + +// Aggregate mocks base method. +func (m *MockPublicKey) Aggregate(p2 common.PublicKey) common.PublicKey { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Aggregate", p2) + ret0, _ := ret[0].(common.PublicKey) + return ret0 +} + +// Aggregate indicates an expected call of Aggregate. +func (mr *MockPublicKeyMockRecorder) Aggregate(p2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aggregate", reflect.TypeOf((*MockPublicKey)(nil).Aggregate), p2) +} + +// Copy mocks base method. +func (m *MockPublicKey) Copy() common.PublicKey { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Copy") + ret0, _ := ret[0].(common.PublicKey) + return ret0 +} + +// Copy indicates an expected call of Copy. +func (mr *MockPublicKeyMockRecorder) Copy() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Copy", reflect.TypeOf((*MockPublicKey)(nil).Copy)) +} + +// Equals mocks base method. +func (m *MockPublicKey) Equals(p2 common.PublicKey) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Equals", p2) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Equals indicates an expected call of Equals. +func (mr *MockPublicKeyMockRecorder) Equals(p2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equals", reflect.TypeOf((*MockPublicKey)(nil).Equals), p2) +} + +// IsInfinite mocks base method. +func (m *MockPublicKey) IsInfinite() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsInfinite") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsInfinite indicates an expected call of IsInfinite. +func (mr *MockPublicKeyMockRecorder) IsInfinite() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInfinite", reflect.TypeOf((*MockPublicKey)(nil).IsInfinite)) +} + +// Marshal mocks base method. +func (m *MockPublicKey) Marshal() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Marshal") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Marshal indicates an expected call of Marshal. +func (mr *MockPublicKeyMockRecorder) Marshal() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshal", reflect.TypeOf((*MockPublicKey)(nil).Marshal)) +} + +// MockSignature is a mock of Signature interface. +type MockSignature struct { + ctrl *gomock.Controller + recorder *MockSignatureMockRecorder +} + +// MockSignatureMockRecorder is the mock recorder for MockSignature. +type MockSignatureMockRecorder struct { + mock *MockSignature +} + +// NewMockSignature creates a new mock instance. +func NewMockSignature(ctrl *gomock.Controller) *MockSignature { + mock := &MockSignature{ctrl: ctrl} + mock.recorder = &MockSignatureMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSignature) EXPECT() *MockSignatureMockRecorder { + return m.recorder +} + +// AggregateVerify mocks base method. +func (m *MockSignature) AggregateVerify(pubKeys []common.PublicKey, msgs [][32]byte) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AggregateVerify", pubKeys, msgs) + ret0, _ := ret[0].(bool) + return ret0 +} + +// AggregateVerify indicates an expected call of AggregateVerify. +func (mr *MockSignatureMockRecorder) AggregateVerify(pubKeys, msgs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateVerify", reflect.TypeOf((*MockSignature)(nil).AggregateVerify), pubKeys, msgs) +} + +// Copy mocks base method. +func (m *MockSignature) Copy() common.Signature { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Copy") + ret0, _ := ret[0].(common.Signature) + return ret0 +} + +// Copy indicates an expected call of Copy. +func (mr *MockSignatureMockRecorder) Copy() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Copy", reflect.TypeOf((*MockSignature)(nil).Copy)) +} + +// Eth2FastAggregateVerify mocks base method. +func (m *MockSignature) Eth2FastAggregateVerify(pubKeys []common.PublicKey, msg [32]byte) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Eth2FastAggregateVerify", pubKeys, msg) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Eth2FastAggregateVerify indicates an expected call of Eth2FastAggregateVerify. +func (mr *MockSignatureMockRecorder) Eth2FastAggregateVerify(pubKeys, msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Eth2FastAggregateVerify", reflect.TypeOf((*MockSignature)(nil).Eth2FastAggregateVerify), pubKeys, msg) +} + +// FastAggregateVerify mocks base method. +func (m *MockSignature) FastAggregateVerify(pubKeys []common.PublicKey, msg [32]byte) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FastAggregateVerify", pubKeys, msg) + ret0, _ := ret[0].(bool) + return ret0 +} + +// FastAggregateVerify indicates an expected call of FastAggregateVerify. +func (mr *MockSignatureMockRecorder) FastAggregateVerify(pubKeys, msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FastAggregateVerify", reflect.TypeOf((*MockSignature)(nil).FastAggregateVerify), pubKeys, msg) +} + +// Marshal mocks base method. +func (m *MockSignature) Marshal() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Marshal") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Marshal indicates an expected call of Marshal. +func (mr *MockSignatureMockRecorder) Marshal() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshal", reflect.TypeOf((*MockSignature)(nil).Marshal)) +} + +// Verify mocks base method. +func (m *MockSignature) Verify(pubKey common.PublicKey, msg []byte) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify", pubKey, msg) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Verify indicates an expected call of Verify. +func (mr *MockSignatureMockRecorder) Verify(pubKey, msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockSignature)(nil).Verify), pubKey, msg) +} diff --git a/crypto/bls/constants.go b/crypto/bls/constants.go new file mode 100644 index 0000000000..f66aa5517a --- /dev/null +++ b/crypto/bls/constants.go @@ -0,0 +1,7 @@ +package bls + +// DomainByteLength length of domain byte array. +const DomainByteLength = 4 + +// CurveOrder for the BLS12-381 curve. +const CurveOrder = "52435875175126190479447740508185965837690552500527637822603658699938581184513" diff --git a/crypto/bls/error.go b/crypto/bls/error.go new file mode 100644 index 0000000000..4875668c6d --- /dev/null +++ b/crypto/bls/error.go @@ -0,0 +1 @@ +package bls diff --git a/crypto/bls/herumi/init.go b/crypto/bls/herumi/init.go new file mode 100644 index 0000000000..9afa76ab5e --- /dev/null +++ b/crypto/bls/herumi/init.go @@ -0,0 +1,16 @@ +package herumi + +import "github.com/herumi/bls-eth-go-binary/bls" + +// HerumiInit allows the required curve orders and appropriate sub-groups to be initialized. +func HerumiInit() { + if err := bls.Init(bls.BLS12_381); err != nil { + panic(err) + } + if err := bls.SetETHmode(bls.EthModeDraft07); err != nil { + panic(err) + } + // Check subgroup order for pubkeys and signatures. + bls.VerifyPublicKeyOrder(true) + bls.VerifySignatureOrder(true) +} diff --git a/crypto/bls/interface.go b/crypto/bls/interface.go new file mode 100644 index 0000000000..913ba0529e --- /dev/null +++ b/crypto/bls/interface.go @@ -0,0 +1,14 @@ +package bls + +import ( + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +// PublicKey represents a BLS public key. +type PublicKey = common.PublicKey + +// SecretKey represents a BLS secret or private key. +type SecretKey = common.SecretKey + +// Signature represents a BLS signature. +type Signature = common.Signature diff --git a/crypto/bls/signature_batch.go b/crypto/bls/signature_batch.go new file mode 100644 index 0000000000..526d0c9430 --- /dev/null +++ b/crypto/bls/signature_batch.go @@ -0,0 +1,204 @@ +package bls + +import ( + "encoding/hex" + "fmt" + + "github.com/pkg/errors" +) + +// AggregatedSignature represents aggregated signature produced by AggregateBatch() +const AggregatedSignature = "bls aggregated signature" + +// SignatureBatch refers to the defined set of +// signatures and its respective public keys and +// messages required to verify it. +type SignatureBatch struct { + Signatures [][]byte + PublicKeys []PublicKey + Messages [][32]byte + Descriptions []string +} + +// NewSet constructs an empty signature batch object. +func NewSet() *SignatureBatch { + return &SignatureBatch{ + Signatures: [][]byte{}, + PublicKeys: []PublicKey{}, + Messages: [][32]byte{}, + Descriptions: []string{}, + } +} + +// Join merges the provided signature batch to out current one. +func (s *SignatureBatch) Join(set *SignatureBatch) *SignatureBatch { + s.Signatures = append(s.Signatures, set.Signatures...) + s.PublicKeys = append(s.PublicKeys, set.PublicKeys...) + s.Messages = append(s.Messages, set.Messages...) + s.Descriptions = append(s.Descriptions, set.Descriptions...) + return s +} + +// Verify the current signature batch using the batch verify algorithm. +func (s *SignatureBatch) Verify() (bool, error) { + return VerifyMultipleSignatures(s.Signatures, s.Messages, s.PublicKeys) +} + +// VerifyVerbosely verifies signatures as a whole at first, if fails, fallback +// to verify each single signature to identify invalid ones. +func (s *SignatureBatch) VerifyVerbosely() (bool, error) { + valid, err := s.Verify() + if err != nil || valid { + return valid, err + } + + // if signature batch is invalid, we then verify signatures one by one. + + errmsg := "some signatures are invalid. details:" + for i := 0; i < len(s.Signatures); i++ { + sig := s.Signatures[i] + msg := s.Messages[i] + pubKey := s.PublicKeys[i] + + valid, err := VerifySignature(sig, msg, pubKey) + if !valid { + desc := s.Descriptions[i] + if err != nil { + errmsg += fmt.Sprintf("\nsignature '%s' is invalid."+ + " signature: 0x%s, public key: 0x%s, message: 0x%v, error: %v", + desc, hex.EncodeToString(sig), hex.EncodeToString(pubKey.Marshal()), + hex.EncodeToString(msg[:]), err) + } else { + errmsg += fmt.Sprintf("\nsignature '%s' is invalid."+ + " signature: 0x%s, public key: 0x%s, message: 0x%v", + desc, hex.EncodeToString(sig), hex.EncodeToString(pubKey.Marshal()), + hex.EncodeToString(msg[:])) + } + } + } + + return false, errors.Errorf(errmsg) +} + +// Copy the attached signature batch and return it +// to the caller. +func (s *SignatureBatch) Copy() *SignatureBatch { + signatures := make([][]byte, len(s.Signatures)) + pubkeys := make([]PublicKey, len(s.PublicKeys)) + messages := make([][32]byte, len(s.Messages)) + descriptions := make([]string, len(s.Descriptions)) + for i := range s.Signatures { + sig := make([]byte, len(s.Signatures[i])) + copy(sig, s.Signatures[i]) + signatures[i] = sig + } + for i := range s.PublicKeys { + pubkeys[i] = s.PublicKeys[i].Copy() + } + for i := range s.Messages { + copy(messages[i][:], s.Messages[i][:]) + } + copy(descriptions, s.Descriptions) + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubkeys, + Messages: messages, + Descriptions: descriptions, + } +} + +// RemoveDuplicates removes duplicate signature sets from the signature batch. +func (s *SignatureBatch) RemoveDuplicates() (int, *SignatureBatch, error) { + if len(s.Signatures) == 0 || len(s.PublicKeys) == 0 || len(s.Messages) == 0 { + return 0, s, nil + } + if len(s.Signatures) != len(s.PublicKeys) || len(s.Signatures) != len(s.Messages) { + return 0, s, errors.Errorf("mismatch number of signatures, publickeys and messages in signature batch. "+ + "Signatures %d, Public Keys %d , Messages %d", s.Signatures, s.PublicKeys, s.Messages) + } + sigMap := make(map[string]int) + duplicateSet := make(map[int]bool) + for i := 0; i < len(s.Signatures); i++ { + if sigIdx, ok := sigMap[string(s.Signatures[i])]; ok { + if s.PublicKeys[sigIdx].Equals(s.PublicKeys[i]) && + s.Messages[sigIdx] == s.Messages[i] { + duplicateSet[i] = true + continue + } + } + sigMap[string(s.Signatures[i])] = i + } + + sigs := s.Signatures[:0] + pubs := s.PublicKeys[:0] + msgs := s.Messages[:0] + descs := s.Descriptions[:0] + + for i := 0; i < len(s.Signatures); i++ { + if duplicateSet[i] { + continue + } + sigs = append(sigs, s.Signatures[i]) + pubs = append(pubs, s.PublicKeys[i]) + msgs = append(msgs, s.Messages[i]) + descs = append(descs, s.Descriptions[i]) + } + + s.Signatures = sigs + s.PublicKeys = pubs + s.Messages = msgs + s.Descriptions = descs + + return len(duplicateSet), s, nil +} + +// AggregateBatch aggregates common messages in the provided batch to +// reduce the number of pairings required when we finally verify the +// whole batch. +func (s *SignatureBatch) AggregateBatch() (*SignatureBatch, error) { + if len(s.Signatures) != len(s.PublicKeys) || len(s.Signatures) != len(s.Messages) || len(s.Signatures) != len(s.Descriptions) { + return s, errors.Errorf("mismatch number of signatures, publickeys, messages and descriptions in signature batch. "+ + "Signatures %d, Public Keys %d , Messages %d, Descriptions %d", len(s.Signatures), len(s.PublicKeys), len(s.Messages), len(s.Descriptions)) + } + if len(s.Signatures) == 0 { + return s, nil + } + msgMap := make(map[[32]byte]*SignatureBatch) + + for i := 0; i < len(s.Messages); i++ { + currMsg := s.Messages[i] + currBatch, ok := msgMap[currMsg] + if ok { + currBatch.Signatures = append(currBatch.Signatures, s.Signatures[i]) + currBatch.Messages = append(currBatch.Messages, s.Messages[i]) + currBatch.PublicKeys = append(currBatch.PublicKeys, s.PublicKeys[i]) + currBatch.Descriptions = append(currBatch.Descriptions, s.Descriptions[i]) + continue + } + currBatch = &SignatureBatch{ + Signatures: [][]byte{s.Signatures[i]}, + Messages: [][32]byte{s.Messages[i]}, + PublicKeys: []PublicKey{s.PublicKeys[i]}, + Descriptions: []string{s.Descriptions[i]}, + } + msgMap[currMsg] = currBatch + } + newSt := NewSet() + for rt, b := range msgMap { + if len(b.PublicKeys) > 1 { + aggPub := AggregateMultiplePubkeys(b.PublicKeys) + aggSig, err := AggregateCompressedSignatures(b.Signatures) + if err != nil { + return nil, err + } + copiedRt := rt + b.PublicKeys = []PublicKey{aggPub} + b.Signatures = [][]byte{aggSig.Marshal()} + b.Messages = [][32]byte{copiedRt} + b.Descriptions = []string{AggregatedSignature} + } + newObj := *b + newSt = newSt.Join(&newObj) + } + return newSt, nil +} diff --git a/crypto/bls/signature_batch_test.go b/crypto/bls/signature_batch_test.go new file mode 100644 index 0000000000..3caccd936e --- /dev/null +++ b/crypto/bls/signature_batch_test.go @@ -0,0 +1,746 @@ +package bls + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +const TestSignature = "test signature" + +func TestCopySignatureSet(t *testing.T) { + t.Run("blst", func(t *testing.T) { + key, err := RandKey() + assert.NoError(t, err) + key2, err := RandKey() + assert.NoError(t, err) + key3, err := RandKey() + assert.NoError(t, err) + + message := [32]byte{'C', 'D'} + message2 := [32]byte{'E', 'F'} + message3 := [32]byte{'H', 'I'} + + sig := key.Sign(message[:]) + sig2 := key2.Sign(message2[:]) + sig3 := key3.Sign(message3[:]) + + set := &SignatureBatch{ + Signatures: [][]byte{sig.Marshal()}, + PublicKeys: []PublicKey{key.PublicKey()}, + Messages: [][32]byte{message}, + Descriptions: createDescriptions(1), + } + set2 := &SignatureBatch{ + Signatures: [][]byte{sig2.Marshal()}, + PublicKeys: []PublicKey{key.PublicKey()}, + Messages: [][32]byte{message}, + Descriptions: createDescriptions(1), + } + set3 := &SignatureBatch{ + Signatures: [][]byte{sig3.Marshal()}, + PublicKeys: []PublicKey{key.PublicKey()}, + Messages: [][32]byte{message}, + Descriptions: createDescriptions(1), + } + aggSet := set.Join(set2).Join(set3) + aggSet2 := aggSet.Copy() + + assert.Equal(t, aggSet, aggSet2) + }) +} + +func TestVerifyVerbosely_AllSignaturesValid(t *testing.T) { + set := NewValidSignatureSet(t, "good", 3) + valid, err := set.VerifyVerbosely() + assert.NoError(t, err) + assert.Equal(t, true, valid, "SignatureSet is expected to be valid") +} + +func TestVerifyVerbosely_SomeSignaturesInvalid(t *testing.T) { + goodSet := NewValidSignatureSet(t, "good", 3) + badSet := NewInvalidSignatureSet(t, "bad", 3, false) + set := NewSet().Join(goodSet).Join(badSet) + valid, err := set.VerifyVerbosely() + assert.Equal(t, false, valid, "SignatureSet is expected to be invalid") + assert.Contains(t, err.Error(), "signature 'signature of bad0' is invalid") + assert.Contains(t, err.Error(), "signature 'signature of bad1' is invalid") + assert.Contains(t, err.Error(), "signature 'signature of bad2' is invalid") + assert.NotContains(t, err.Error(), "signature 'signature of good0' is invalid") + assert.NotContains(t, err.Error(), "signature 'signature of good1' is invalid") + assert.NotContains(t, err.Error(), "signature 'signature of good2' is invalid") +} + +func TestVerifyVerbosely_VerificationThrowsError(t *testing.T) { + goodSet := NewValidSignatureSet(t, "good", 1) + badSet := NewInvalidSignatureSet(t, "bad", 1, true) + set := NewSet().Join(goodSet).Join(badSet) + valid, err := set.VerifyVerbosely() + assert.Equal(t, false, valid, "SignatureSet is expected to be invalid") + assert.Contains(t, err.Error(), "signature 'signature of bad0' is invalid") + assert.Contains(t, err.Error(), "error: could not unmarshal bytes into signature") + assert.NotContains(t, err.Error(), "signature 'signature of good0' is invalid") +} + +func TestSignatureBatch_RemoveDuplicates(t *testing.T) { + var keys []SecretKey + for i := 0; i < 100; i++ { + key, err := RandKey() + assert.NoError(t, err) + keys = append(keys, key) + } + tests := []struct { + name string + batchCreator func() (input *SignatureBatch, output *SignatureBatch) + want int + }{ + { + name: "empty batch", + batchCreator: func() (*SignatureBatch, *SignatureBatch) { + return &SignatureBatch{}, &SignatureBatch{} + }, + want: 0, + }, + { + name: "valid duplicates in batch", + batchCreator: func() (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:20] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + allSigs := append(signatures, signatures...) + allPubs := append(pubs, pubs...) + allMsgs := append(messages, messages...) + return &SignatureBatch{ + Signatures: allSigs, + PublicKeys: allPubs, + Messages: allMsgs, + Descriptions: createDescriptions(len(allMsgs)), + }, &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(allMsgs)), + } + }, + want: 20, + }, + { + name: "valid duplicates in batch with multiple messages", + batchCreator: func() (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:30] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + msg1 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '1'} + msg2 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '2'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys[:10] { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[10:20] { + s := k.Sign(msg1[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg1) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[20:30] { + s := k.Sign(msg2[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg2) + pubs = append(pubs, k.PublicKey()) + } + allSigs := append(signatures, signatures...) + allPubs := append(pubs, pubs...) + allMsgs := append(messages, messages...) + return &SignatureBatch{ + Signatures: allSigs, + PublicKeys: allPubs, + Messages: allMsgs, + Descriptions: createDescriptions(len(allMsgs)), + }, &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(allMsgs)), + } + }, + want: 30, + }, + { + name: "no duplicates in batch with multiple messages", + batchCreator: func() (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:30] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + msg1 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '1'} + msg2 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '2'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys[:10] { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[10:20] { + s := k.Sign(msg1[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg1) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[20:30] { + s := k.Sign(msg2[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg2) + pubs = append(pubs, k.PublicKey()) + } + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + }, &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + } + }, + want: 0, + }, + { + name: "valid duplicates and invalid duplicates in batch with multiple messages", + batchCreator: func() (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:30] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + msg1 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '1'} + msg2 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '2'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys[:10] { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[10:20] { + s := k.Sign(msg1[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg1) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[20:30] { + s := k.Sign(msg2[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg2) + pubs = append(pubs, k.PublicKey()) + } + allSigs := append(signatures, signatures...) + // Make it a non-unique entry + allSigs[10] = make([]byte, 96) + allPubs := append(pubs, pubs...) + allMsgs := append(messages, messages...) + // Insert it back at the end + signatures = append(signatures, signatures[10]) + pubs = append(pubs, pubs[10]) + messages = append(messages, messages[10]) + // Zero out to expected result + signatures[10] = make([]byte, 96) + return &SignatureBatch{ + Signatures: allSigs, + PublicKeys: allPubs, + Messages: allMsgs, + Descriptions: createDescriptions(len(allMsgs)), + }, &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(allMsgs)), + } + }, + want: 29, + }, + { + name: "valid duplicates and invalid duplicates with signature,pubkey,message in batch with multiple messages", + batchCreator: func() (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:30] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + msg1 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '1'} + msg2 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '2'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys[:10] { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[10:20] { + s := k.Sign(msg1[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg1) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[20:30] { + s := k.Sign(msg2[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg2) + pubs = append(pubs, k.PublicKey()) + } + allSigs := append(signatures, signatures...) + // Make it a non-unique entry + allSigs[10] = make([]byte, 96) + + allPubs := append(pubs, pubs...) + allPubs[20] = keys[len(keys)-1].PublicKey() + + allMsgs := append(messages, messages...) + allMsgs[29] = [32]byte{'j', 'u', 'n', 'k'} + + // Insert it back at the end + signatures = append(signatures, signatures[10]) + pubs = append(pubs, pubs[10]) + messages = append(messages, messages[10]) + // Zero out to expected result + signatures[10] = make([]byte, 96) + + // Insert it back at the end + signatures = append(signatures, signatures[20]) + pubs = append(pubs, pubs[20]) + messages = append(messages, messages[20]) + // Zero out to expected result + pubs[20] = keys[len(keys)-1].PublicKey() + + // Insert it back at the end + signatures = append(signatures, signatures[29]) + pubs = append(pubs, pubs[29]) + messages = append(messages, messages[29]) + messages[29] = [32]byte{'j', 'u', 'n', 'k'} + + return &SignatureBatch{ + Signatures: allSigs, + PublicKeys: allPubs, + Messages: allMsgs, + Descriptions: createDescriptions(len(allMsgs)), + }, &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + } + }, + want: 27, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + input, output := tt.batchCreator() + num, res, err := input.RemoveDuplicates() + assert.NoError(t, err) + if num != tt.want { + t.Errorf("RemoveDuplicates() got = %v, want %v", num, tt.want) + } + if !reflect.DeepEqual(res.Signatures, output.Signatures) { + t.Errorf("RemoveDuplicates() Signatures output = %v, want %v", res.Signatures, output.Signatures) + } + if !reflect.DeepEqual(res.PublicKeys, output.PublicKeys) { + t.Errorf("RemoveDuplicates() Publickeys output = %v, want %v", res.PublicKeys, output.PublicKeys) + } + if !reflect.DeepEqual(res.Messages, output.Messages) { + t.Errorf("RemoveDuplicates() Messages output = %v, want %v", res.Messages, output.Messages) + } + }) + } +} + +func TestSignatureBatch_AggregateBatch(t *testing.T) { + var keys []SecretKey + for i := 0; i < 100; i++ { + key, err := RandKey() + assert.NoError(t, err) + keys = append(keys, key) + } + tests := []struct { + name string + batchCreator func(t *testing.T) (input *SignatureBatch, output *SignatureBatch) + wantErr bool + }{ + { + name: "empty batch", + batchCreator: func(t *testing.T) (*SignatureBatch, *SignatureBatch) { + return &SignatureBatch{Signatures: nil, Messages: nil, PublicKeys: nil, Descriptions: nil}, + &SignatureBatch{Signatures: nil, Messages: nil, PublicKeys: nil, Descriptions: nil} + }, + wantErr: false, + }, + { + name: "mismatch number of signatures and messages in batch", + batchCreator: func(t *testing.T) (*SignatureBatch, *SignatureBatch) { + key1 := keys[0] + key2 := keys[1] + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + sig1 := key1.Sign(msg[:]) + sig2 := key2.Sign(msg[:]) + signatures := [][]byte{sig1.Marshal(), sig2.Marshal()} + pubs := []common.PublicKey{key1.PublicKey(), key2.PublicKey()} + messages := [][32]byte{msg} + descs := createDescriptions(2) + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: descs, + }, &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: descs, + } + }, + wantErr: true, + }, + { + name: "valid signatures in batch", + batchCreator: func(t *testing.T) (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:20] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + aggSig, err := AggregateCompressedSignatures(signatures) + assert.NoError(t, err) + aggPub := AggregateMultiplePubkeys(pubs) + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + }, &SignatureBatch{ + Signatures: [][]byte{aggSig.Marshal()}, + PublicKeys: []PublicKey{aggPub}, + Messages: [][32]byte{msg}, + Descriptions: createDescriptions(1, AggregatedSignature), + } + }, + wantErr: false, + }, + { + name: "invalid signatures in batch", + batchCreator: func(t *testing.T) (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:20] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + signatures[10] = make([]byte, 96) + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + }, nil + }, + wantErr: true, + }, + { + name: "valid aggregates in batch with multiple messages", + batchCreator: func(t *testing.T) (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:30] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + msg1 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '1'} + msg2 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '2'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys[:10] { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[10:20] { + s := k.Sign(msg1[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg1) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[20:30] { + s := k.Sign(msg2[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg2) + pubs = append(pubs, k.PublicKey()) + } + aggSig1, err := AggregateCompressedSignatures(signatures[:10]) + assert.NoError(t, err) + aggSig2, err := AggregateCompressedSignatures(signatures[10:20]) + assert.NoError(t, err) + aggSig3, err := AggregateCompressedSignatures(signatures[20:30]) + assert.NoError(t, err) + aggPub1 := AggregateMultiplePubkeys(pubs[:10]) + aggPub2 := AggregateMultiplePubkeys(pubs[10:20]) + aggPub3 := AggregateMultiplePubkeys(pubs[20:30]) + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + }, &SignatureBatch{ + Signatures: [][]byte{aggSig1.Marshal(), aggSig2.Marshal(), aggSig3.Marshal()}, + PublicKeys: []PublicKey{aggPub1, aggPub2, aggPub3}, + Messages: [][32]byte{msg, msg1, msg2}, + Descriptions: createDescriptions(3, AggregatedSignature), + } + }, + wantErr: false, + }, + { + name: "common and uncommon messages in batch with multiple messages", + batchCreator: func(t *testing.T) (*SignatureBatch, *SignatureBatch) { + chosenKeys := keys[:30] + + msg := [32]byte{'r', 'a', 'n', 'd', 'o', 'm'} + msg1 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '1'} + msg2 := [32]byte{'r', 'a', 'n', 'd', 'o', 'm', '2'} + var signatures [][]byte + var messages [][32]byte + var pubs []PublicKey + for _, k := range chosenKeys[:10] { + s := k.Sign(msg[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[10:20] { + s := k.Sign(msg1[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg1) + pubs = append(pubs, k.PublicKey()) + } + for _, k := range chosenKeys[20:30] { + s := k.Sign(msg2[:]) + signatures = append(signatures, s.Marshal()) + messages = append(messages, msg2) + pubs = append(pubs, k.PublicKey()) + } + // Set a custom message + messages[5][31] ^= byte(100) + messages[15][31] ^= byte(100) + messages[25][31] ^= byte(100) + + var newSigs [][]byte + newSigs = append(newSigs, signatures[:5]...) + newSigs = append(newSigs, signatures[6:10]...) + + aggSig1, err := AggregateCompressedSignatures(newSigs) + assert.NoError(t, err) + + newSigs = [][]byte{} + newSigs = append(newSigs, signatures[10:15]...) + newSigs = append(newSigs, signatures[16:20]...) + aggSig2, err := AggregateCompressedSignatures(newSigs) + assert.NoError(t, err) + + newSigs = [][]byte{} + newSigs = append(newSigs, signatures[20:25]...) + newSigs = append(newSigs, signatures[26:30]...) + aggSig3, err := AggregateCompressedSignatures(newSigs) + assert.NoError(t, err) + + var newPubs []PublicKey + newPubs = append(newPubs, pubs[:5]...) + newPubs = append(newPubs, pubs[6:10]...) + + aggPub1 := AggregateMultiplePubkeys(newPubs) + + newPubs = []PublicKey{} + newPubs = append(newPubs, pubs[10:15]...) + newPubs = append(newPubs, pubs[16:20]...) + aggPub2 := AggregateMultiplePubkeys(newPubs) + + newPubs = []PublicKey{} + newPubs = append(newPubs, pubs[20:25]...) + newPubs = append(newPubs, pubs[26:30]...) + aggPub3 := AggregateMultiplePubkeys(newPubs) + + return &SignatureBatch{ + Signatures: signatures, + PublicKeys: pubs, + Messages: messages, + Descriptions: createDescriptions(len(messages)), + }, &SignatureBatch{ + Signatures: [][]byte{aggSig1.Marshal(), signatures[5], aggSig2.Marshal(), signatures[15], aggSig3.Marshal(), signatures[25]}, + PublicKeys: []PublicKey{aggPub1, pubs[5], aggPub2, pubs[15], aggPub3, pubs[25]}, + Messages: [][32]byte{msg, messages[5], msg1, messages[15], msg2, messages[25]}, + Descriptions: []string{AggregatedSignature, TestSignature, AggregatedSignature, TestSignature, AggregatedSignature, TestSignature}, + } + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + input, output := tt.batchCreator(t) + got, err := input.AggregateBatch() + if (err != nil) != tt.wantErr { + t.Errorf("AggregateBatch() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + got = sortSet(got) + output = sortSet(output) + + if !reflect.DeepEqual(got.Signatures, output.Signatures) { + t.Errorf("AggregateBatch() Signatures got = %v, want %v", got.Signatures, output.Signatures) + } + if !reflect.DeepEqual(got.PublicKeys, output.PublicKeys) { + t.Errorf("AggregateBatch() PublicKeys got = %v, want %v", got.PublicKeys, output.PublicKeys) + } + if !reflect.DeepEqual(got.Messages, output.Messages) { + t.Errorf("AggregateBatch() Messages got = %v, want %v", got.Messages, output.Messages) + } + if !reflect.DeepEqual(got.Descriptions, output.Descriptions) { + t.Errorf("AggregateBatch() Descriptions got = %v, want %v", got.Descriptions, output.Descriptions) + } + }) + } +} + +func NewValidSignatureSet(t *testing.T, msgBody string, num int) *SignatureBatch { + set := &SignatureBatch{ + Signatures: make([][]byte, num), + PublicKeys: make([]common.PublicKey, num), + Messages: make([][32]byte, num), + Descriptions: make([]string, num), + } + + for i := 0; i < num; i++ { + priv, err := RandKey() + require.NoError(t, err) + pubkey := priv.PublicKey() + msg := messageBytes(fmt.Sprintf("%s%d", msgBody, i)) + sig := priv.Sign(msg[:]).Marshal() + desc := fmt.Sprintf("signature of %s%d", msgBody, i) + + set.Signatures[i] = sig + set.PublicKeys[i] = pubkey + set.Messages[i] = msg + set.Descriptions[i] = desc + } + + return set +} + +func NewInvalidSignatureSet(t *testing.T, msgBody string, num int, throwErr bool) *SignatureBatch { + set := &SignatureBatch{ + Signatures: make([][]byte, num), + PublicKeys: make([]common.PublicKey, num), + Messages: make([][32]byte, num), + Descriptions: make([]string, num), + } + + for i := 0; i < num; i++ { + priv, err := RandKey() + require.NoError(t, err) + pubkey := priv.PublicKey() + msg := messageBytes(fmt.Sprintf("%s%d", msgBody, i)) + var sig []byte + if throwErr { + sig = make([]byte, 96) + } else { + badMsg := messageBytes("badmsg") + sig = priv.Sign(badMsg[:]).Marshal() + } + desc := fmt.Sprintf("signature of %s%d", msgBody, i) + + set.Signatures[i] = sig + set.PublicKeys[i] = pubkey + set.Messages[i] = msg + set.Descriptions[i] = desc + } + + return set +} + +func messageBytes(message string) [32]byte { + var bytes [32]byte + copy(bytes[:], []byte(message)) + return bytes +} + +func createDescriptions(length int, text ...string) []string { + desc := make([]string, length) + for i := range desc { + if len(text) > 0 { + desc[i] = text[0] + } else { + desc[i] = TestSignature + } + } + return desc +} + +func sortSet(s *SignatureBatch) *SignatureBatch { + sort.Sort(sorter{set: s}) + return s +} + +type sorter struct { + set *SignatureBatch +} + +func (s sorter) Len() int { + return len(s.set.Messages) +} + +func (s sorter) Swap(i, j int) { + s.set.Signatures[i], s.set.Signatures[j] = s.set.Signatures[j], s.set.Signatures[i] + s.set.PublicKeys[i], s.set.PublicKeys[j] = s.set.PublicKeys[j], s.set.PublicKeys[i] + s.set.Messages[i], s.set.Messages[j] = s.set.Messages[j], s.set.Messages[i] + s.set.Descriptions[i], s.set.Descriptions[j] = s.set.Descriptions[j], s.set.Descriptions[i] +} + +func (s sorter) Less(i, j int) bool { + return bytes.Compare(s.set.Messages[i][:], s.set.Messages[j][:]) == -1 +} diff --git a/crypto/rand/rand.go b/crypto/rand/rand.go new file mode 100644 index 0000000000..10360559b0 --- /dev/null +++ b/crypto/rand/rand.go @@ -0,0 +1,86 @@ +/* +Package rand defines methods of obtaining random number generators. + +One is expected to use randomness from this package only, without introducing any other packages. +This limits the scope of code that needs to be hardened. + +There are two modes, one for deterministic and another non-deterministic randomness: +1. If deterministic pseudo-random generator is enough, use: + + import "github.com/ethereum/go-ethereum/crypto/rand" + randGen := rand.NewDeterministicGenerator() + randGen.Intn(32) // or any other func defined in math.rand API + + In this mode, only seed is generated using cryptographically secure source (crypto/rand). So, + once seed is obtained, and generator is seeded, the next generations are deterministic, thus fast. + However given that we only seed this 63 bits from crypto/rand and use math/rand to generate the outputs, + this method is not cryptographically secure. This is directly stated in the math/rand package, + https://github.com/golang/go/blob/release-branch.go1.17/src/math/rand/rand.go#L15. For any security + sensitive work this particular generator is NOT to be used. + +2. For cryptographically secure non-deterministic mode (CSPRNG), use: + + import "github.com/ethereum/go-ethereum/crypto/rand" + randGen := rand.NewGenerator() + randGen.Intn(32) // or any other func defined in math.rand API + + Again, any of the functions from `math/rand` can be used, however, they all use custom source + of randomness (crypto/rand), on every step. This makes randomness non-deterministic. However, + you take a performance hit -- as it is an order of magnitude slower. +*/ +package rand + +import ( + "crypto/rand" + "encoding/binary" + mrand "math/rand" + "sync" +) + +type source struct{} + +var lock sync.RWMutex +var _ mrand.Source64 = (*source)(nil) // #nosec G404 -- This ensures we meet the interface + +// Seed does nothing when crypto/rand is used as source. +func (_ *source) Seed(_ int64) {} + +// Int63 returns uniformly-distributed random (as in CSPRNG) int64 value within [0, 1<<63) range. +// Panics if random generator reader cannot return data. +func (s *source) Int63() int64 { + return int64(s.Uint64() & ^uint64(1<<63)) +} + +// Uint64 returns uniformly-distributed random (as in CSPRNG) uint64 value within [0, 1<<64) range. +// Panics if random generator reader cannot return data. +func (_ *source) Uint64() (val uint64) { + lock.RLock() + defer lock.RUnlock() + if err := binary.Read(rand.Reader, binary.BigEndian, &val); err != nil { + panic(err) + } + return +} + +// Rand is alias for underlying random generator. +type Rand = mrand.Rand // #nosec G404 + +// NewGenerator returns a new generator that uses random values from crypto/rand as a source +// (cryptographically secure random number generator). +// Panics if crypto/rand input cannot be read. +// Use it for everything where crypto secure non-deterministic randomness is required. Performance +// takes a hit, so use sparingly. +func NewGenerator() *Rand { + return mrand.New(&source{}) // #nosec G404 -- excluded +} + +// NewDeterministicGenerator returns a random generator which is only seeded with crypto/rand, +// but is deterministic otherwise (given seed, produces given results, deterministically). +// Panics if crypto/rand input cannot be read. +// Use this method for performance, where deterministic pseudo-random behaviour is enough. +// Otherwise, rely on NewGenerator(). This method is not cryptographically secure as outputs +// can be potentially predicted even without knowledge of the underlying seed. +func NewDeterministicGenerator() *Rand { + randGen := NewGenerator() + return mrand.New(mrand.NewSource(randGen.Int63())) // #nosec G404 -- excluded +} diff --git a/crypto/rand/rand_test.go b/crypto/rand/rand_test.go new file mode 100644 index 0000000000..599006def2 --- /dev/null +++ b/crypto/rand/rand_test.go @@ -0,0 +1,24 @@ +package rand + +import ( + "math/rand" + "testing" +) + +func TestNewGenerator(_ *testing.T) { + // Make sure that generation works, no panics. + randGen := NewGenerator() + _ = randGen.Int63() + _ = randGen.Uint64() + _ = randGen.Intn(32) + var _ = rand.Source64(randGen) +} + +func TestNewDeterministicGenerator(_ *testing.T) { + // Make sure that generation works, no panics. + randGen := NewDeterministicGenerator() + _ = randGen.Int63() + _ = randGen.Uint64() + _ = randGen.Intn(32) + var _ = rand.Source64(randGen) +} diff --git a/docker/chainnode/Dockerfile b/docker/chainnode/Dockerfile deleted file mode 100644 index 7596bc452a..0000000000 --- a/docker/chainnode/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Geth in a stock Go builder container -FROM golang:1.17.0-alpine3.13 as builder - -RUN apk add --no-cache make gcc musl-dev linux-headers git - -COPY . /opt -RUN cd /opt && make ronin - -# Pull Geth into a second stage deploy alpine container -FROM alpine:3.13 - -RUN apk add --no-cache ca-certificates -WORKDIR "/opt" - -ENV PASSWORD '' -ENV PRIVATE_KEY '' -ENV BOOTNODES '' -ENV VERBOSITY 3 -ENV SYNC_MODE 'snap' -ENV NETWORK_ID '2021' -ENV ETHSTATS_ENDPOINT '' -ENV RPC_NODE '' -ENV NODEKEY '' -ENV FORCE_INIT 'true' -ENV RONIN_PARAMS '' -ENV INIT_FORCE_OVERRIDE_CHAIN_CONFIG 'false' - -COPY --from=builder /opt/build/bin/ronin /usr/local/bin/ronin -COPY --from=builder /opt/genesis/ ./ -COPY --from=builder /opt/docker/chainnode/entrypoint.sh ./ - -EXPOSE 7000 6060 8545 8546 30303 30303/udp - -ENTRYPOINT ["./entrypoint.sh"] diff --git a/docker/chainnode/docker-compose.yaml b/docker/chainnode/docker-compose.yaml deleted file mode 100644 index c6d342dfae..0000000000 --- a/docker/chainnode/docker-compose.yaml +++ /dev/null @@ -1,33 +0,0 @@ -version: "3" -services: - node: - build: - context: ../../ - dockerfile: docker/chainnode/Dockerfile - restart: always - hostname: node - container_name: node - ports: - - "8545:8545" - - "8546:8546" - - "30303:30303" - - "30303:30303/udp" - volumes: - - ~/.skymavis/ronin:/opt/ronin - environment: - - SYNC_MODE=${SYNC_MODE} - - BOOTNODES=${BOOTNODES} - - NETWORK_ID=${NETWORK_ID} - - GENESIS_PATH=${GENESIS_PATH} - - RPC_NODE=${RPC_NODE} - - DATA_DIR=${DATA_DIR} - - SUBSCRIBER=${SUBSCRIBER} - - KAFKA_URL=${KAFKA_URL} - - KAFKA_USERNAME=${KAFKA_USERNAME} - - KAFKA_PASSWORD=${KAFKA_PASSWORD} - - KAFKA_AUTHENTICATION_TYPE=${KAFKA_AUTHENTICATION_TYPE} - - VERBOSITY=${VERBOSITY} - - CONFIRM_BLOCK_AT=${CONFIRM_BLOCK_AT} - - RONIN_PARAMS=${RONIN_PARAMS} - - MINE=${MINE} - network_mode: bridge diff --git a/docker/chainnode/entrypoint.sh b/docker/chainnode/entrypoint.sh index b48e49c359..2a3faa8e83 100755 --- a/docker/chainnode/entrypoint.sh +++ b/docker/chainnode/entrypoint.sh @@ -14,12 +14,15 @@ datadir="/ronin/data" KEYSTORE_DIR="/ronin/keystore" PASSWORD_FILE="/ronin/password" +BLS_PASSWORD_FILE="/ronin/bls_password" +BLS_PRIVATE_KEY_DIR="/ronin/bls_keystore" # variables genesisPath="" params="" syncmode="snap" mine="true" +blsParams="" set -e @@ -68,12 +71,28 @@ if [[ ! -f $PASSWORD_FILE ]]; then if [[ ! -z $PASSWORD ]]; then echo "Password env is set. Writing into file." echo "$PASSWORD" > $PASSWORD_FILE + unset PASSWORD else echo "No password set (or empty), generating a new one" $(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c 32 > $PASSWORD_FILE) fi fi +# BLS password file +if [[ ! -f $BLS_PASSWORD_FILE ]]; then + mkdir -p $KEYSTORE_DIR + if [[ ! -z $BLS_PASSWORD ]]; then + echo "BLS password env is set. Writing into file." + echo "$BLS_PASSWORD" > $BLS_PASSWORD_FILE + unset BLS_PASSWORD + else + if [[ "$ENABLE_FAST_FINALITY_SIGN" = "true" && "$BLS_AUTO_GENERATE" = "true" ]]; then + echo "No BLS password set (or empty), generating a new one" + $(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c 32 > $BLS_PASSWORD_FILE) + fi + fi +fi + accountsCount=$( ronin account list --datadir $datadir --keystore $KEYSTORE_DIR \ 2> /dev/null \ @@ -117,11 +136,69 @@ elif [[ "$mine" = "true" ]]; then echo "Warning: A mining node is started without private key environment provided" fi -accountsCount=$( - ronin account list --datadir $datadir --keystore $KEYSTORE_DIR \ - 2> /dev/null \ - | wc -l -) +if [[ "$ENABLE_FAST_FINALITY" = "true" ]]; then + params="$params --finality.enable" +fi + +if [[ "$ENABLE_FAST_FINALITY_SIGN" = "true" ]]; then + mkdir -p $BLS_PRIVATE_KEY_DIR + blsAccountsCount=$( + ronin account listbls \ + --finality.blspasswordpath $BLS_PASSWORD_FILE \ + --finality.blswalletpath $BLS_PRIVATE_KEY_DIR \ + 2> /dev/null \ + | wc -l + ) + + if [[ ! -z $BLS_PRIVATE_KEY ]]; then + echo "$BLS_PRIVATE_KEY" > ./bls_private_key + if [[ $blsAccountsCount -le 0 ]]; then + echo "No BLS accounts found" + echo "Creating BLS account from BLS private key" + ronin account importbls ./bls_private_key \ + --finality.blspasswordpath $BLS_PASSWORD_FILE \ + --finality.blswalletpath $BLS_PRIVATE_KEY_DIR + else + set +e + ronin account checkbls ./bls_private_key \ + --finality.blspasswordpath $BLS_PASSWORD_FILE \ + --finality.blswalletpath $BLS_PRIVATE_KEY_DIR 2> /dev/null + exitCode=$? + if [[ $exitCode -ne 0 ]]; then + echo "An account with different public key already exists in $KEYSTORE_DIR" + echo "Please consider remove account in keystore" \ + "or unset the BLS private key environment variable" + exit 1 + fi + set -e + fi + rm ./bls_private_key + unset BLS_PRIVATE_KEY + else + if [[ $blsAccountsCount -eq 0 ]]; then + if [[ $BLS_AUTO_GENERATE = "true" ]]; then + ronin account generatebls \ + --finality.blspasswordpath $BLS_PASSWORD_FILE \ + --finality.blswalletpath $BLS_PRIVATE_KEY_DIR + else + echo "Error: Enable fast finality without providing BLS secret key" + exit 1 + fi + fi + fi + + blsParams="--finality.enablesign --finality.blspasswordpath $BLS_PASSWORD_FILE --finality.blswalletpath $BLS_PRIVATE_KEY_DIR" + blsAccount=$( + ronin account listbls \ + --finality.blspasswordpath $BLS_PASSWORD_FILE \ + --finality.blswalletpath $BLS_PRIVATE_KEY_DIR \ + 2> /dev/null \ + | head -n 1 \ + | cut -d"{" -f 2 | cut -d"}" -f 1 + ) + + echo "Using BLS account $blsAccount" +fi if [[ $accountsCount -gt 0 ]]; then account=$( @@ -217,6 +294,14 @@ echo "dump: $account $BOOTNODES" set -x +if [[ "$BLS_SHOW_PRIVATE_KEY" = "true" ]]; then + mkdir -p $BLS_PRIVATE_KEY_DIR + exec ronin account listbls \ + --finality.blspasswordpath $BLS_PASSWORD_FILE \ + --finality.blswalletpath $BLS_PRIVATE_KEY_DIR \ + --secret +fi + exec ronin $params \ --syncmode $syncmode \ --verbosity $VERBOSITY \ @@ -235,4 +320,5 @@ exec ronin $params \ --ws.origins "*" \ --allow-insecure-unlock \ --miner.gastarget "100000000" \ + $blsParams \ "$@" diff --git a/eth/api.go b/eth/api.go index d0544c1c09..904ba05a20 100644 --- a/eth/api.go +++ b/eth/api.go @@ -295,6 +295,8 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error var block *types.Block if blockNr == rpc.LatestBlockNumber { block = api.eth.blockchain.CurrentBlock() + } else if blockNr == rpc.FinalizedBlockNumber { + block = api.eth.blockchain.FinalizedBlock() } else { block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) } @@ -383,6 +385,8 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta var block *types.Block if number == rpc.LatestBlockNumber { block = api.eth.blockchain.CurrentBlock() + } else if number == rpc.FinalizedBlockNumber { + block = api.eth.blockchain.FinalizedBlock() } else { block = api.eth.blockchain.GetBlockByNumber(uint64(number)) } diff --git a/eth/api_backend.go b/eth/api_backend.go index d936b69bee..246e3c0a55 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -19,10 +19,11 @@ package eth import ( "context" "errors" - "github.com/ethereum/go-ethereum/eth/tracers" "math/big" "time" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -73,6 +74,10 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb if number == rpc.LatestBlockNumber { return b.eth.blockchain.CurrentBlock().Header(), nil } + if number == rpc.FinalizedBlockNumber { + return b.eth.blockchain.FinalizedBlock().Header(), nil + } + return b.eth.blockchain.GetHeaderByNumber(uint64(number)), nil } @@ -107,6 +112,9 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe if number == rpc.LatestBlockNumber { return b.eth.blockchain.CurrentBlock(), nil } + if number == rpc.FinalizedBlockNumber { + return b.eth.blockchain.FinalizedBlock(), nil + } return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil } diff --git a/eth/backend.go b/eth/backend.go index 46c3672cb4..53788316ee 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/consortium" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vote" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -45,6 +46,7 @@ import ( "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/ronin" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -223,16 +225,46 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if checkpoint == nil { checkpoint = params.TrustedCheckpoints[genesisHash] } + var votePool *vote.VotePool + nodeConfig := stack.Config() + if nodeConfig.EnableFastFinality { + if config.DisableRoninProtocol { + return nil, errors.New("fast finality requires ronin protocol to be enabled") + } + + finalityEngine, ok := eth.engine.(consensus.FastFinalityPoSA) + if !ok { + return nil, errors.New("consensus engine does not support fast finality") + } + votePool = vote.NewVotePool(eth.blockchain, finalityEngine, nodeConfig.MaxCurVoteAmountPerBlock) + + if _, err := vote.NewVoteManager( + eth, + chainDb, + chainConfig, + eth.blockchain, + votePool, + nodeConfig.EnableFastFinalitySign, + nodeConfig.BlsPasswordPath, + nodeConfig.BlsWalletPath, + finalityEngine, + nil, + ); err != nil { + return nil, err + } + } if eth.handler, err = newHandler(&handlerConfig{ - Database: chainDb, - Chain: eth.blockchain, - TxPool: eth.txPool, - Network: config.NetworkId, - Sync: config.SyncMode, - BloomCache: uint64(cacheLimit), - EventMux: eth.eventMux, - Checkpoint: checkpoint, - Whitelist: config.Whitelist, + Database: chainDb, + Chain: eth.blockchain, + TxPool: eth.txPool, + Network: config.NetworkId, + Sync: config.SyncMode, + BloomCache: uint64(cacheLimit), + EventMux: eth.eventMux, + Checkpoint: checkpoint, + Whitelist: config.Whitelist, + DisableRoninProtocol: config.DisableRoninProtocol, + VotePool: votePool, }); err != nil { return nil, err } @@ -257,6 +289,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } return state.GetFenixValidators(stateDb, eth.blockchain.Config().FenixValidatorContractAddress), nil }) + if votePool != nil { + c.SetVotePool(votePool) + } } // The first thing the node will do is reconstruct the verification data for // the head block (ethash cache or clique voting snapshot). Might as well do @@ -567,6 +602,11 @@ func (s *Ethereum) Protocols() []p2p.Protocol { if s.config.SnapshotCache > 0 { protos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...) } + + if !s.config.DisableRoninProtocol { + protos = append(protos, ronin.MakeProtocols((*roninHandler)(s.handler))...) + } + return protos } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 70b7c7462d..fd07d731fb 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -212,6 +212,9 @@ type Config struct { // Enable double sign monitoring EnableMonitorDoubleSign bool + + // Disable ronin p2p protocol + DisableRoninProtocol bool } // CreateConsensusEngine creates a consensus engine for the given chain configuration. diff --git a/eth/handler.go b/eth/handler.go index c470bc3a25..bdb6eeee70 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -28,9 +28,11 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vote" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/fetcher" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/ronin" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -76,15 +78,17 @@ type txPool interface { // handlerConfig is the collection of initialization parameters to create a full // node network handler. type handlerConfig struct { - Database ethdb.Database // Database for direct sync insertions - Chain *core.BlockChain // Blockchain to serve data from - TxPool txPool // Transaction pool to propagate from - Network uint64 // Network identifier to adfvertise - Sync downloader.SyncMode // Whether to fast or full sync - BloomCache uint64 // Megabytes to alloc for fast sync bloom - EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` - Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges - Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged + Database ethdb.Database // Database for direct sync insertions + Chain *core.BlockChain // Blockchain to serve data from + TxPool txPool // Transaction pool to propagate from + Network uint64 // Network identifier to adfvertise + Sync downloader.SyncMode // Whether to fast or full sync + BloomCache uint64 // Megabytes to alloc for fast sync bloom + EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` + Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges + Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged + DisableRoninProtocol bool // Ronin protocol is enabled + VotePool *vote.VotePool // Vote pool when fast finality is enabled } type handler struct { @@ -121,7 +125,14 @@ type handler struct { chainSync *chainSyncer wg sync.WaitGroup - peerWG sync.WaitGroup + + handlerStartCh chan struct{} + handlerDoneCh chan struct{} + + disableRoninProtocol bool + votePool *vote.VotePool + voteCh chan core.NewVoteEvent + voteSub event.Subscription } // newHandler returns a handler for all Ethereum chain management protocol. @@ -131,15 +142,19 @@ func newHandler(config *handlerConfig) (*handler, error) { config.EventMux = new(event.TypeMux) // Nicety initialization for tests } h := &handler{ - networkID: config.Network, - forkFilter: forkid.NewFilter(config.Chain), - eventMux: config.EventMux, - database: config.Database, - txpool: config.TxPool, - chain: config.Chain, - peers: newPeerSet(), - whitelist: config.Whitelist, - quitSync: make(chan struct{}), + networkID: config.Network, + forkFilter: forkid.NewFilter(config.Chain), + eventMux: config.EventMux, + database: config.Database, + txpool: config.TxPool, + chain: config.Chain, + peers: newPeerSet(), + whitelist: config.Whitelist, + quitSync: make(chan struct{}), + handlerDoneCh: make(chan struct{}), + handlerStartCh: make(chan struct{}), + disableRoninProtocol: config.DisableRoninProtocol, + votePool: config.VotePool, } if config.Sync == downloader.FullSync { // The database seems empty as the current block is the genesis. Yet the fast @@ -231,9 +246,50 @@ func newHandler(config *handlerConfig) (*handler, error) { return h, nil } +// protoTracker tracks the number of active protocol handlers. +func (h *handler) protoTracker() { + defer h.wg.Done() + var active int + for { + select { + case <-h.handlerStartCh: + active++ + case <-h.handlerDoneCh: + active-- + case <-h.quitSync: + // Wait for all active handlers to finish. + for ; active > 0; active-- { + <-h.handlerDoneCh + } + return + } + } +} + +// incHandlers signals to increment the number of active handlers if not +// quitting. +func (h *handler) incHandlers() bool { + select { + case h.handlerStartCh <- struct{}{}: + return true + case <-h.quitSync: + return false + } +} + +// decHandlers signals to decrement the number of active handlers. +func (h *handler) decHandlers() { + h.handlerDoneCh <- struct{}{} +} + // runEthPeer registers an eth peer into the joint eth/snap peerset, adds it to // various subsistems and starts handling messages. func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { + if !h.incHandlers() { + return p2p.DiscQuitting + } + defer h.decHandlers() + // If the peer has a `snap` extension, wait for it to connect so we can have // a uniform initialization/teardown mechanism snap, err := h.peers.waitSnapExtension(peer) @@ -241,12 +297,15 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { peer.Log().Error("Snapshot extension barrier failed", "err", err) return err } - // TODO(karalabe): Not sure why this is needed - if !h.chainSync.handlePeerEvent(peer) { - return p2p.DiscQuitting + + var ronin *ronin.Peer + if !h.disableRoninProtocol { + ronin, err = h.peers.waitRoninExtension(peer) + if err != nil { + peer.Log().Error("Ronin extension barrier failed", "err", err) + return err + } } - h.peerWG.Add(1) - defer h.peerWG.Done() // Execute the Ethereum handshake var ( @@ -281,7 +340,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { peer.Log().Debug("Ethereum peer connected", "name", peer.Name()) // Register the peer locally - if err := h.peers.registerPeer(peer, snap); err != nil { + if err := h.peers.registerPeer(peer, snap, ronin); err != nil { peer.Log().Error("Ethereum peer registration failed", "err", err) return err } @@ -302,7 +361,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { return err } } - h.chainSync.handlePeerEvent(peer) + h.chainSync.handlePeerEvent() // Propagate existing transactions. new transactions appearing // after this will be sent via broadcasts. @@ -342,8 +401,10 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { // `eth`, all subsystem registrations and lifecycle management will be done by // the main `eth` handler to prevent strange races. func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error { - h.peerWG.Add(1) - defer h.peerWG.Done() + if !h.incHandlers() { + return p2p.DiscQuitting + } + defer h.decHandlers() if err := h.peers.registerSnapExtension(peer); err != nil { peer.Log().Info("Snapshot extension registration failed", "err", err) @@ -352,6 +413,20 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error return handler(peer) } +func (h *handler) runRoninExtension(peer *ronin.Peer, handler ronin.Handler) error { + if !h.incHandlers() { + return p2p.DiscQuitting + } + defer h.decHandlers() + + if err := h.peers.registerRoninExtension(peer); err != nil { + peer.Log().Info("Ronin extension registration failed", "err", err) + return err + } + + return handler(peer) +} + // removePeer requests disconnection of a peer. func (h *handler) removePeer(id string) { peer := h.peers.peer(id) @@ -408,23 +483,36 @@ func (h *handler) Start(maxPeers int) { // start sync handlers h.wg.Add(1) go h.chainSync.loop() + + // start peer handler tracker + h.wg.Add(1) + go h.protoTracker() + + if h.votePool != nil { + h.voteCh = make(chan core.NewVoteEvent) + h.voteSub = h.votePool.SubscribeNewVoteEvent(h.voteCh) + h.wg.Add(1) + go h.voteBroadcastLoop() + } } func (h *handler) Stop() { h.txsSub.Unsubscribe() // quits txBroadcastLoop h.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop + if h.voteSub != nil { + h.voteSub.Unsubscribe() // quits voteBroadcastLoop + } // Quit chainSync and txsync64. // After this is done, no new peers will be accepted. close(h.quitSync) - h.wg.Wait() // Disconnect existing sessions. // This also closes the gate for any new registrations on the peer set. // sessions which are already established but not added to h.peers yet // will exit when they try to register. h.peers.close() - h.peerWG.Wait() + h.wg.Wait() log.Info("Ethereum protocol stopped") } @@ -529,3 +617,22 @@ func (h *handler) txBroadcastLoop() { } } } + +func (h *handler) broadcastVote(voteEnvelop *types.VoteEnvelope) { + roninPeers := h.peers.roninPeerWithoutVote(voteEnvelop.Hash()) + for _, peer := range roninPeers { + peer.AsyncSendNewVote(voteEnvelop) + } +} + +func (h *handler) voteBroadcastLoop() { + defer h.wg.Done() + for { + select { + case voteEvent := <-h.voteCh: + h.broadcastVote(voteEvent.Vote) + case <-h.voteSub.Err(): + return + } + } +} diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 3ff9f2245b..fb5643e89a 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -212,7 +212,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td // Update the peer's total difficulty if better than the previous if _, td := peer.Head(); trueTD.Cmp(td) > 0 { peer.SetHead(trueHead, trueTD) - h.chainSync.handlePeerEvent(peer) + h.chainSync.handlePeerEvent() } return nil } diff --git a/eth/handler_ronin.go b/eth/handler_ronin.go new file mode 100644 index 0000000000..0aa8389b4b --- /dev/null +++ b/eth/handler_ronin.go @@ -0,0 +1,40 @@ +package eth + +import ( + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/ronin" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +type roninHandler handler + +func (r *roninHandler) RunPeer(peer *ronin.Peer, hand ronin.Handler) error { + return (*handler)(r).runRoninExtension(peer, hand) +} + +func (r *roninHandler) PeerInfo(id enode.ID) interface{} { + ethPeer := r.peers.peer(id.String()) + if ethPeer != nil && ethPeer.roninExt != nil { + return ethPeer.roninExt.Version() + } else { + return nil + } +} + +func (r *roninHandler) Handle(peer *ronin.Peer, packet ronin.Packet) error { + switch packet.Kind() { + case ronin.NewVoteMsg: + if r.votePool != nil { + votePacket := packet.(*ronin.NewVotePacket) + for _, rawVote := range votePacket.Vote { + vote := &types.VoteEnvelope{ + RawVoteEnvelope: *rawVote, + } + r.votePool.PutVote(peer.ID(), vote) + } + } else { + peer.Log().Debug("Local node does not enable fast finality, drop new vote msg") + } + } + return nil +} diff --git a/eth/handler_ronin_test.go b/eth/handler_ronin_test.go new file mode 100644 index 0000000000..9f28c99cd2 --- /dev/null +++ b/eth/handler_ronin_test.go @@ -0,0 +1,185 @@ +package eth + +import ( + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/ronin" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// testRoninHandler is a mock event handler to listen for inbound network requests +// on the `eth` protocol and convert them into a more easily testable form. +type testRoninHandler struct { + voteBroadcasts event.Feed +} + +func (h *testRoninHandler) RunPeer(*ronin.Peer, ronin.Handler) error { panic("not used in tests") } +func (h *testRoninHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } + +func (h *testRoninHandler) Handle(peer *ronin.Peer, packet ronin.Packet) error { + switch packet.Kind() { + case ronin.NewVoteMsg: + h.voteBroadcasts.Send(packet.Name()) + return nil + + default: + panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet)) + } +} + +func TestVoteBroadcast(t *testing.T) { + const peers = 10 + protocols := []p2p.Protocol{ + { + Name: eth.ProtocolName, + Version: eth.ETH66, + }, + { + Name: ronin.ProtocolName, + Version: ronin.Ronin1, + }, + } + caps := []p2p.Cap{ + { + Name: eth.ProtocolName, + Version: eth.ETH66, + }, + { + Name: ronin.ProtocolName, + Version: ronin.Ronin1, + }, + } + + // Create a source eth handler + source := newTestHandler() + defer source.close() + + sinksEth := make([]*testEthHandler, peers) + for i := 0; i < len(sinksEth); i++ { + sinksEth[i] = new(testEthHandler) + } + sinksRonin := make([]*testRoninHandler, peers) + for i := 0; i < len(sinksRonin); i++ { + sinksRonin[i] = new(testRoninHandler) + } + + // Interconnect all the sink handlers with the source handler + var ( + genesis = source.chain.Genesis() + td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) + ) + for i := range sinksEth { + sinkEth := sinksEth[i] + sinkRonin := sinksRonin[i] + + sourceEthPipe, sinkEthPipe := p2p.MsgPipe() + defer sourceEthPipe.Close() + defer sinkEthPipe.Close() + + sourceEthPeer := eth.NewPeer( + eth.ETH66, + p2p.NewPeerPipeWithProtocol(enode.ID{byte(i + 1)}, "", caps, sourceEthPipe, protocols), + sourceEthPipe, + nil, + ) + sinkEthPeer := eth.NewPeer( + eth.ETH66, + p2p.NewPeerPipeWithProtocol(enode.ID{0}, "", caps, sinkEthPipe, protocols), + sinkEthPipe, + nil, + ) + defer sourceEthPeer.Close() + defer sinkEthPeer.Close() + + sourceRoninPipe, sinkRoninPipe := p2p.MsgPipe() + defer sourceRoninPipe.Close() + defer sinkRoninPipe.Close() + + sourceRoninPeer := ronin.NewPeer( + ronin.Ronin1, + p2p.NewPeerPipeWithProtocol(enode.ID{byte(i + 1)}, "", caps, sourceRoninPipe, protocols), + sourceRoninPipe, + ) + sinkRoninPeer := ronin.NewPeer( + ronin.Ronin1, + p2p.NewPeerPipeWithProtocol(enode.ID{0}, "", caps, sinkRoninPipe, protocols), + sinkRoninPipe, + ) + defer sourceRoninPeer.Close() + defer sinkRoninPeer.Close() + + go source.handler.runRoninExtension(sourceRoninPeer, func(peer *ronin.Peer) error { + return ronin.Handle((*roninHandler)(source.handler), peer) + }) + go ronin.Handle(sinkRonin, sinkRoninPeer) + + go source.handler.runEthPeer(sourceEthPeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(source.handler), peer) + }) + + if err := sinkEthPeer.Handshake( + 1, + td, + genesis.Hash(), + genesis.Hash(), + forkid.NewIDWithChain(source.chain), + forkid.NewFilter(source.chain), + ); err != nil { + t.Fatalf("failed to run protocol handshake, err %s", err) + } + + go eth.Handle(sinkEth, sinkEthPeer) + } + + // Subscribe to all the vote sinks + voteChs := make([]chan string, len(sinksRonin)) + for i := 0; i < len(sinksRonin); i++ { + voteChs[i] = make(chan string, 1) + defer close(voteChs[i]) + + sub := sinksRonin[i].voteBroadcasts.Subscribe(voteChs[i]) + defer sub.Unsubscribe() + } + + // Initiate a vote propagation across the peers + time.Sleep(100 * time.Millisecond) + source.handler.broadcastVote(&types.VoteEnvelope{ + RawVoteEnvelope: types.RawVoteEnvelope{ + Data: &types.VoteData{ + TargetNumber: 0, + TargetHash: common.Hash{}, + }, + }, + }) + + // Iterate through all the sinks and ensure the correct number of the votes + done := make(chan struct{}, peers) + for _, ch := range voteChs { + ch := ch + go func() { + <-ch + done <- struct{}{} + }() + } + var received int + for { + select { + case <-done: + received++ + + case <-time.After(200 * time.Millisecond): + if received != peers { + t.Errorf("broadcast count mismatch: have %d, want %d", received, peers) + } + return + } + } +} diff --git a/eth/peer.go b/eth/peer.go index 1cea9c640e..c3d03b33da 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -22,6 +22,7 @@ import ( "time" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/ronin" "github.com/ethereum/go-ethereum/eth/protocols/snap" ) @@ -36,7 +37,8 @@ type ethPeerInfo struct { // ethPeer is a wrapper around eth.Peer to maintain a few extra metadata. type ethPeer struct { *eth.Peer - snapExt *snapPeer // Satellite `snap` connection + snapExt *snapPeer // Satellite `snap` connection + roninExt *ronin.Peer // Satellite `ronin` connection syncDrop *time.Timer // Connection dropper if `eth` sync progress isn't validated in time snapWait chan struct{} // Notification channel for snap connections diff --git a/eth/peerset.go b/eth/peerset.go index 1e864a8e46..930f60e5e7 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/ronin" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/p2p" ) @@ -43,6 +44,10 @@ var ( // errSnapWithoutEth is returned if a peer attempts to connect only on the // snap protocol without advertizing the eth main protocol. errSnapWithoutEth = errors.New("peer connected on snap without compatible eth support") + + // errRoninWithoutEth is returned if a peer attempts to connect only on the + // ronin protocol without advertizing the eth main protocol. + errRoninWithoutEth = errors.New("peer connected on ronin without compatible eth support") ) // peerSet represents the collection of active peers currently participating in @@ -54,6 +59,9 @@ type peerSet struct { snapWait map[string]chan *snap.Peer // Peers connected on `eth` waiting for their snap extension snapPend map[string]*snap.Peer // Peers connected on the `snap` protocol, but not yet on `eth` + roninWait map[string]chan *ronin.Peer // Peers connected on `eth` waiting for their ronin extension + roninPend map[string]*ronin.Peer // Peers connected on the `ronin` protocol, but not yet on `eth` + lock sync.RWMutex closed bool } @@ -61,9 +69,11 @@ type peerSet struct { // newPeerSet creates a new peer set to track the active participants. func newPeerSet() *peerSet { return &peerSet{ - peers: make(map[string]*ethPeer), - snapWait: make(map[string]chan *snap.Peer), - snapPend: make(map[string]*snap.Peer), + peers: make(map[string]*ethPeer), + snapWait: make(map[string]chan *snap.Peer), + snapPend: make(map[string]*snap.Peer), + roninWait: make(map[string]chan *ronin.Peer), + roninPend: make(map[string]*ronin.Peer), } } @@ -131,9 +141,66 @@ func (ps *peerSet) waitSnapExtension(peer *eth.Peer) (*snap.Peer, error) { return <-wait, nil } +// registerRoninExtension unblocks an already connected `eth` peer waiting for its +// `ronin` extension, or if no such peer exists, tracks the extension for the time +// being until the `eth` main protocol starts looking for it. +func (ps *peerSet) registerRoninExtension(peer *ronin.Peer) error { + if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { + return errRoninWithoutEth + } + + ps.lock.Lock() + defer ps.lock.Unlock() + + id := peer.ID() + if _, ok := ps.peers[id]; ok { + return errPeerAlreadyRegistered // avoid connections with the same id as existing ones + } + if _, ok := ps.roninPend[id]; ok { + return errPeerAlreadyRegistered // avoid connections with the same id as pending ones + } + if wait, ok := ps.roninWait[id]; ok { + delete(ps.roninWait, id) + wait <- peer + return nil + } + ps.roninPend[id] = peer + return nil +} + +// waitRoninExtensions blocks until `ronin` satellite protocols are connected and tracked +// by the peerset. +func (ps *peerSet) waitRoninExtension(peer *eth.Peer) (*ronin.Peer, error) { + // If the peer does not support a compatible `ronin`, don't wait + if !peer.RunningCap(ronin.ProtocolName, ronin.ProtocolVersions) { + return nil, nil + } + ps.lock.Lock() + + id := peer.ID() + if _, ok := ps.peers[id]; ok { + ps.lock.Unlock() + return nil, errPeerAlreadyRegistered // avoid connections with the same id as existing ones + } + if _, ok := ps.roninWait[id]; ok { + ps.lock.Unlock() + return nil, errPeerAlreadyRegistered // avoid connections with the same id as pending ones + } + if peer, ok := ps.roninPend[id]; ok { + delete(ps.roninPend, id) + ps.lock.Unlock() + return peer, nil + } + wait := make(chan *ronin.Peer) + ps.roninWait[id] = wait + ps.lock.Unlock() + + return <-wait, nil +} + // registerPeer injects a new `eth` peer into the working set, or returns an error // if the peer is already known. -func (ps *peerSet) registerPeer(peer *eth.Peer, ext *snap.Peer) error { +func (ps *peerSet) registerPeer(peer *eth.Peer, ext *snap.Peer, roninExt *ronin.Peer) error { // Start tracking the new peer ps.lock.Lock() defer ps.lock.Unlock() @@ -152,6 +219,9 @@ func (ps *peerSet) registerPeer(peer *eth.Peer, ext *snap.Peer) error { eth.snapExt = &snapPeer{ext} ps.snapPeers++ } + if roninExt != nil { + eth.roninExt = roninExt + } ps.peers[id] = eth return nil } @@ -247,6 +317,20 @@ func (ps *peerSet) peerWithHighestTD() *eth.Peer { return bestPeer } +func (ps *peerSet) roninPeerWithoutVote(hash common.Hash) []*ronin.Peer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + var roninPeers []*ronin.Peer + for _, peer := range ps.peers { + if peer.roninExt != nil && !peer.roninExt.KnownFinalityVote(hash) { + roninPeers = append(roninPeers, peer.roninExt) + } + } + + return roninPeers +} + // close disconnects all peers. func (ps *peerSet) close() { ps.lock.Lock() diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 1b4cfeb3da..baec03a10b 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -21,9 +21,9 @@ import ( "math/rand" "sync" - mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/rlp" ) @@ -56,14 +56,6 @@ const ( maxQueuedBlockAnns = 4 ) -// max is a helper function which returns the larger of the two given integers. -func max(a, b int) int { - if a > b { - return a - } - return b -} - // Peer is a collection of relevant information we have about a `eth` peer. type Peer struct { id string // Unique ID for the peer, cached @@ -75,14 +67,14 @@ type Peer struct { head common.Hash // Latest advertised head block hash td *big.Int // Latest advertised head block total difficulty - knownBlocks *knownCache // Set of block hashes known to be known by this peer + knownBlocks *protocols.KnownCache // Set of block hashes known to be known by this peer queuedBlocks chan *blockPropagation // Queue of blocks to broadcast to the peer queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer - txpool TxPool // Transaction pool used by the broadcasters for liveness checks - knownTxs *knownCache // Set of transaction hashes known to be known by this peer - txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests - txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests + txpool TxPool // Transaction pool used by the broadcasters for liveness checks + knownTxs *protocols.KnownCache // Set of transaction hashes known to be known by this peer + txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests + txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests term chan struct{} // Termination channel to stop the broadcasters lock sync.RWMutex // Mutex protecting the internal fields @@ -96,8 +88,8 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe Peer: p, rw: rw, version: version, - knownTxs: newKnownCache(maxKnownTxs), - knownBlocks: newKnownCache(maxKnownBlocks), + knownTxs: protocols.NewKnownCache(maxKnownTxs), + knownBlocks: protocols.NewKnownCache(maxKnownBlocks), queuedBlocks: make(chan *blockPropagation, maxQueuedBlocks), queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns), txBroadcast: make(chan []common.Hash), @@ -424,37 +416,3 @@ func (p *Peer) RequestTxs(hashes []common.Hash) error { GetPooledTransactionsPacket: hashes, }) } - -// knownCache is a cache for known hashes. -type knownCache struct { - hashes mapset.Set - max int -} - -// newKnownCache creates a new knownCache with a max capacity. -func newKnownCache(max int) *knownCache { - return &knownCache{ - max: max, - hashes: mapset.NewSet(), - } -} - -// Add adds a list of elements to the set. -func (k *knownCache) Add(hashes ...common.Hash) { - for k.hashes.Cardinality() > max(0, k.max-len(hashes)) { - k.hashes.Pop() - } - for _, hash := range hashes { - k.hashes.Add(hash) - } -} - -// Contains returns whether the given item is in the set. -func (k *knownCache) Contains(hash common.Hash) bool { - return k.hashes.Contains(hash) -} - -// Cardinality returns the number of elements in the set. -func (k *knownCache) Cardinality() int { - return k.hashes.Cardinality() -} diff --git a/eth/protocols/eth/peer_test.go b/eth/protocols/eth/peer_test.go index fc93443708..c22da3e958 100644 --- a/eth/protocols/eth/peer_test.go +++ b/eth/protocols/eth/peer_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" ) @@ -64,7 +65,7 @@ func (p *testPeer) close() { func TestPeerSet(t *testing.T) { size := 5 - s := newKnownCache(size) + s := protocols.NewKnownCache(size) // add 10 items for i := 0; i < size*2; i++ { diff --git a/eth/protocols/known_cache.go b/eth/protocols/known_cache.go new file mode 100644 index 0000000000..eac9292cad --- /dev/null +++ b/eth/protocols/known_cache.go @@ -0,0 +1,48 @@ +package protocols + +import ( + mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/common" +) + +// max is a helper function which returns the larger of the two given integers. +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// KnownCache is a cache for known hashes. +type KnownCache struct { + hashes mapset.Set + max int +} + +// NewKnownCache creates a new knownCache with a max capacity. +func NewKnownCache(max int) *KnownCache { + return &KnownCache{ + max: max, + hashes: mapset.NewSet(), + } +} + +// Add adds a list of elements to the set. +func (k *KnownCache) Add(hashes ...common.Hash) { + for k.hashes.Cardinality() > max(0, k.max-len(hashes)) { + k.hashes.Pop() + } + for _, hash := range hashes { + k.hashes.Add(hash) + } +} + +// Contains returns whether the given item is in the set. +func (k *KnownCache) Contains(hash common.Hash) bool { + return k.hashes.Contains(hash) +} + +// Cardinality returns the number of elements in the set. +func (k *KnownCache) Cardinality() int { + return k.hashes.Cardinality() +} diff --git a/eth/protocols/ronin/handler.go b/eth/protocols/ronin/handler.go new file mode 100644 index 0000000000..0926d78667 --- /dev/null +++ b/eth/protocols/ronin/handler.go @@ -0,0 +1,127 @@ +package ronin + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// Handler is a callback to invoke from an outside runner after the boilerplate +// exchanges have passed. +type Handler func(peer *Peer) error + +type Backend interface { + // RunPeer is invoked when a peer joins on the `ronin` protocol. The handler + // should do any peer maintenance work, handshakes and validations. If all + // is passed, control should be given back to the `handler` to process the + // inbound messages going forward. + RunPeer(peer *Peer, handler Handler) error + + // PeerInfo retrieves all known `ronin` information about a peer. + PeerInfo(id enode.ID) interface{} + + // Handle is a callback to be invoked when a data packet is received from + // the remote peer. Only packets not consumed by the protocol handler will + // be forwarded to the backend. + Handle(peer *Peer, packet Packet) error +} + +func MakeProtocols(backend Backend) []p2p.Protocol { + protocol := make([]p2p.Protocol, len(ProtocolVersions)) + for i, version := range ProtocolVersions { + protocol[i] = p2p.Protocol{ + Name: ProtocolName, + Version: version, + Length: protocolLengths[version], + Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error { + roninPeer := NewPeer(version, peer, rw) + defer roninPeer.Close() + + return backend.RunPeer(roninPeer, func(peer *Peer) error { + return Handle(backend, peer) + }) + }, + NodeInfo: func() interface{} { + return nodeInfo() + }, + PeerInfo: func(id enode.ID) interface{} { + return backend.PeerInfo(id) + }, + } + } + + return protocol +} + +// Handle is the callback invoked to manage the life cycle of a `ronin` peer. +// When this function terminates, the peer is disconnected. +func Handle(backend Backend, peer *Peer) error { + for { + err := handleMessage(backend, peer) + if err != nil { + peer.Log().Debug("Failed to handle `ronin` message", "err", err) + return err + } + } +} + +// handleMessage is invoked whenever an inbound message is received from a +// remote peer on the `ronin` protocol. The remote connection is torn down upon +// returning any error. +func handleMessage(backend Backend, peer *Peer) error { + msg, err := peer.rw.ReadMsg() + if err != nil { + return err + } + + if msg.Size > maxMessageSize { + return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) + } + + defer msg.Discard() + start := time.Now() + // Track the emount of time it takes to serve the request and run the handler + if metrics.Enabled { + h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) + defer func(start time.Time) { + sampler := func() metrics.Sample { + return metrics.ResettingSample( + metrics.NewExpDecaySample(1028, 0.015), + ) + } + metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) + }(start) + } + + switch msg.Code { + case NewVoteMsg: + var votePacket NewVotePacket + if err := msg.Decode(&votePacket); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + for _, packet := range votePacket.Vote { + vote := types.VoteEnvelope{ + RawVoteEnvelope: *packet, + } + + peer.markFinalityVote(vote.Hash()) + } + + return backend.Handle(peer, &votePacket) + default: + return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) + } +} + +// NodeInfo represents a short summary of the `ronin` sub-protocol metadata +// known about the host peer. +type NodeInfo struct{} + +// nodeInfo retrieves some `ronin` protocol metadata about the running host node. +func nodeInfo() *NodeInfo { + return &NodeInfo{} +} diff --git a/eth/protocols/ronin/peer.go b/eth/protocols/ronin/peer.go new file mode 100644 index 0000000000..48f58f23b9 --- /dev/null +++ b/eth/protocols/ronin/peer.go @@ -0,0 +1,128 @@ +package ronin + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p" +) + +const ( + voteChannelSize = 50 + batchInterval = 100 * time.Millisecond + maxKnownVote = 8192 +) + +// Peer is a collection of relevant information we have about a `ronin` peer. +type Peer struct { + id string // Unique ID for the peer, cached + + *p2p.Peer // The embedded P2P package peer + rw p2p.MsgReadWriter // Input/output streams for snap + version uint // Protocol version negotiated + term chan struct{} // Terminate the batch vote loop + voteCh chan *types.VoteEnvelope // Put vote into pool for batching + + logger log.Logger // Contextual logger with the peer id injected + + knownFinalityVote *protocols.KnownCache // Set of finality vote hashes knowed to be known by this peer +} + +// NewPeer create a wrapper for a network connection and negotiated protocol +// version. +func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { + id := p.ID().String() + peer := &Peer{ + id: id, + Peer: p, + rw: rw, + version: version, + voteCh: make(chan *types.VoteEnvelope, voteChannelSize), + term: make(chan struct{}), + logger: log.New("peer", id[:8]), + knownFinalityVote: protocols.NewKnownCache(maxKnownVote), + } + go peer.batchVote() + + return peer +} + +// Close terminates the vote batch goroutine. +func (p *Peer) Close() { + close(p.term) +} + +// ID retrieves the peer's unique identifier. +func (p *Peer) ID() string { + return p.id +} + +// Version retrieves the peer's negoatiated `ronin` protocol version. +func (p *Peer) Version() uint { + return p.version +} + +// Log overrides the P2P logget with the higher level one containing only the id. +func (p *Peer) Log() log.Logger { + return p.logger +} + +// sendNewVote sends votes to the peer. +func (p *Peer) sendNewVote(votes []*types.VoteEnvelope) error { + var rawVote []*types.RawVoteEnvelope + for _, vote := range votes { + rawVote = append(rawVote, vote.Raw()) + } + return p2p.Send(p.rw, NewVoteMsg, NewVotePacket{ + Vote: rawVote, + }) +} + +// AsyncSendNewVote puts the vote into the batch vote goroutine. +func (p *Peer) AsyncSendNewVote(vote *types.VoteEnvelope) { + select { + case p.voteCh <- vote: + p.markFinalityVote(vote.Hash()) + default: + p.Log().Debug("Dropping vote announcement", "hash", vote.Hash()) + } +} + +// batchVote batches multiple votes and sends to the peer. +func (p *Peer) batchVote() { + var pendingVote []*types.VoteEnvelope + ticker := time.NewTicker(batchInterval) + + for { + select { + case vote := <-p.voteCh: + pendingVote = append(pendingVote, vote) + case <-ticker.C: + if len(pendingVote) > 0 { + if err := p.sendNewVote(pendingVote); err != nil { + p.Log().Debug("Failed to send vote", "err", err) + return + } + pendingVote = nil + } + case <-p.term: + ticker.Stop() + return + } + } +} + +// KnownFinalityVote returns whether peer is known to already have a vote. +func (p *Peer) KnownFinalityVote(hash common.Hash) bool { + return p.knownFinalityVote.Contains(hash) +} + +// markFinalityVote marks a vote as known for the peer, ensuring that it +// will never be propagated to this particular peer. +func (p *Peer) markFinalityVote(hash common.Hash) { + // If we reached the memory allowance, drop a previously known transaction hash + p.knownFinalityVote.Add(hash) +} diff --git a/eth/protocols/ronin/protocol.go b/eth/protocols/ronin/protocol.go new file mode 100644 index 0000000000..5b9a567bf0 --- /dev/null +++ b/eth/protocols/ronin/protocol.go @@ -0,0 +1,49 @@ +package ronin + +import ( + "errors" + + "github.com/ethereum/go-ethereum/core/types" +) + +// Constants to match up protocol versions and messages +const ( + Ronin1 = 1 +) + +// ProtocolName is the official short name of the `ronin` protocol used during +// devp2p capability negotiation. +const ProtocolName = "ronin" + +// ProtocolVersions are the supported versions of the `ronin` protocol +var ProtocolVersions = []uint{Ronin1} + +// protocolLengths are the number of implemented message corresponding to +// different protocol versions. +var protocolLengths = map[uint]uint64{Ronin1: 1} + +// maxMessageSize is the maximum cap on the size of a protocol message. +const maxMessageSize = 10 * 1024 * 1024 + +const ( + NewVoteMsg = 0x00 +) + +var ( + errMsgTooLarge = errors.New("message too long") + errDecode = errors.New("invalid message") + errInvalidMsgCode = errors.New("invalid message code") +) + +// Packet represents a p2p message in the `ronin` protocol. +type Packet interface { + Name() string // Name returns a string corresponding to the message type. + Kind() byte // Kind returns the message type. +} + +type NewVotePacket struct { + Vote []*types.RawVoteEnvelope +} + +func (*NewVotePacket) Name() string { return "NewVote" } +func (*NewVotePacket) Kind() byte { return NewVoteMsg } diff --git a/eth/sync.go b/eth/sync.go index aaac6bef90..d9280ac79d 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -88,7 +88,7 @@ func newChainSyncer(handler *handler) *chainSyncer { // handlePeerEvent notifies the syncer about a change in the peer set. // This is called for new peers and every time a peer announces a new // chain head. -func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { +func (cs *chainSyncer) handlePeerEvent() bool { select { case cs.peerEventCh <- struct{}{}: return true diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index 0271c4c4c7..a61754352a 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -107,15 +107,15 @@ func TestTracer(t *testing.T) { { // tests that we don't panic on bad arguments to memory access code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(15)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(13)) in server-side tracer function 'step'", }, { // tests that we don't panic on bad arguments to stack peeks code: "{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(13)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(11)) in server-side tracer function 'step'", }, { // tests that we don't panic on bad arguments to memory getUint code: "{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(13)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(11)) in server-side tracer function 'step'", }, { // tests some general counting code: "{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}", want: `3`, @@ -150,7 +150,7 @@ func TestTracer(t *testing.T) { }, { code: "{res: [], step: function(log) { if (log.op.toString() === 'STOP') { this.res.push(log.memory.slice(5, 1025 * 1024)) } }, fault: function() {}, result: function() { return this.res }}", want: "", - fail: "reached limit for padding memory slice: 1049568 at step (:1:83(23)) in server-side tracer function 'step'", + fail: "reached limit for padding memory slice: 1049568 at step (:1:83(20)) in server-side tracer function 'step'", contract: []byte{byte(vm.PUSH1), byte(0xff), byte(vm.PUSH1), byte(0x00), byte(vm.MSTORE8), byte(vm.STOP)}, }, } { diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index f8f7e21e6b..c90307e70a 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -537,6 +537,9 @@ func toBlockNumArg(number *big.Int) string { if number.Cmp(big.NewInt(int64(rpc.PendingBlockNumber))) == 0 { return "pending" } + if number.Cmp(big.NewInt(int64(rpc.FinalizedBlockNumber))) == 0 { + return "finalized" + } return hexutil.EncodeBig(number) } diff --git a/genesis/testnet.json b/genesis/testnet.json index ff1f77e573..cb0455e6c5 100644 --- a/genesis/testnet.json +++ b/genesis/testnet.json @@ -22,11 +22,14 @@ "consortiumV2Contracts": { "roninValidatorSet": "0x54B3AC74a90E64E8dDE60671b6fE8F8DDf18eC9d", "slashIndicator": "0xF7837778b6E180Df6696C8Fa986d62f8b6186752", - "stakingContract": "0x9C245671791834daf3885533D24dce516B763B28" + "stakingContract": "0x9C245671791834daf3885533D24dce516B763B28", + "profileContract": "0x3b67c8D22a91572a6AB18acC9F70787Af04A4043", + "finalityTracking": "0x41aCDFe786171824a037f2Cd6224c5916A58969a" }, "puffyBlock": 12254000, "bubaBlock": 14260600, - "olekBlock": 16849000 + "olekBlock": 16849000, + "shillinBlock": 20268000 }, "alloc": { "0x0000000000000000000000000000000000000011": { diff --git a/go.mod b/go.mod index 8cc7562ad4..39bd15803b 100644 --- a/go.mod +++ b/go.mod @@ -6,75 +6,78 @@ require ( github.com/Azure/azure-pipeline-go v0.2.2 // indirect github.com/Azure/azure-storage-blob-go v0.7.0 github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect - github.com/StackExchange/wmi v1.2.1 // indirect - github.com/VictoriaMetrics/fastcache v1.6.0 + github.com/VictoriaMetrics/fastcache v1.12.0 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 github.com/aws/aws-sdk-go-v2/credentials v1.1.1 github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 github.com/btcsuite/btcd v0.20.1-beta - github.com/cespare/cp v0.1.0 + github.com/cespare/cp v1.1.1 github.com/cloudflare/cloudflare-go v0.14.0 github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf - github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 - github.com/edsrzf/mmap-go v1.0.0 - github.com/fatih/color v1.7.0 + github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 + github.com/edsrzf/mmap-go v1.1.0 + github.com/fatih/color v1.9.0 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 - github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/go-stack/stack v1.8.0 - github.com/golang/protobuf v1.5.2 + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 + github.com/go-stack/stack v1.8.1 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 - github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa - github.com/google/uuid v1.2.0 - github.com/gorilla/websocket v1.4.2 + github.com/google/gofuzz v1.2.0 + github.com/google/uuid v1.3.0 + github.com/gorilla/websocket v1.5.0 github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/bloomfilter/v2 v2.0.3 - github.com/holiman/uint256 v1.2.0 - github.com/huin/goupnp v1.0.2 + github.com/holiman/uint256 v1.2.1 + github.com/huin/goupnp v1.1.0 github.com/influxdata/influxdb v1.8.3 github.com/influxdata/influxdb-client-go/v2 v2.8.0 github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect - github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 + github.com/jackpal/go-nat-pmp v1.0.2 github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e github.com/julienschmidt/httprouter v1.3.0 - github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 + github.com/karalabe/usb v0.0.2 github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-colorable v0.1.11 - github.com/mattn/go-isatty v0.0.14 + github.com/mattn/go-colorable v0.1.13 + github.com/mattn/go-isatty v0.0.18 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 - github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 + github.com/peterh/liner v1.2.0 github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible - github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 - github.com/stretchr/testify v1.7.0 - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.5.0 - golang.org/x/text v0.7.0 - golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - google.golang.org/protobuf v1.27.1 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible + github.com/status-im/keycard-go v0.2.0 + github.com/stretchr/testify v1.8.2 + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tyler-smith/go-bip39 v1.1.0 + golang.org/x/crypto v0.7.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.7.0 + golang.org/x/text v0.9.0 + golang.org/x/time v0.0.0-20220922220347-f3bd1da661af + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce - gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 gotest.tools v2.2.0+incompatible // indirect ) require ( github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be - github.com/klauspost/compress v1.15.1 - github.com/pyroscope-io/client v0.7.0 + github.com/golang/mock v1.6.0 + github.com/herumi/bls-eth-go-binary v1.31.0 + github.com/klauspost/compress v1.16.4 + github.com/pkg/errors v0.9.1 + github.com/pyroscope-io/client v0.7.2 + github.com/supranational/blst v0.3.11 + github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.3.1 ) require ( @@ -83,26 +86,27 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect - github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect github.com/go-kit/kit v0.10.0 // indirect - github.com/go-ole/go-ole v1.2.5 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/google/go-cmp v0.5.6 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/gomega v1.18.1 // indirect - github.com/opentracing/opentracing-go v1.1.0 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pyroscope-io/godeltaprof v0.1.0 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/term v0.5.0 // indirect + github.com/pyroscope-io/godeltaprof v0.1.2 // indirect + github.com/rivo/uniseg v0.4.3 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/term v0.7.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index afbbfcf84b..796cecc929 100644 --- a/go.sum +++ b/go.sum @@ -45,10 +45,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= -github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE= +github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= @@ -107,15 +105,19 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= +github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA= @@ -147,35 +149,42 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= -github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= @@ -189,18 +198,20 @@ github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -217,6 +228,8 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -227,10 +240,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= @@ -244,20 +257,23 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= -github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -266,8 +282,8 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -297,17 +313,19 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w= +github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.1 h1:XRtyuda/zw2l+Bq/38n5XUoEF72aSOu/77Thd9pPp2o= +github.com/holiman/uint256 v1.2.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= -github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= +github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= @@ -323,8 +341,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -345,23 +363,26 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY= -github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= +github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -381,10 +402,11 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= @@ -392,17 +414,21 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -410,8 +436,9 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -443,24 +470,24 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= @@ -471,8 +498,8 @@ github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChl github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/peterh/liner v1.2.0 h1:w/UPXyl5GfahFxcTOz2j9wCIHNI+pUPr2laqpojKNCg= +github.com/peterh/liner v1.2.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -505,16 +532,21 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pyroscope-io/client v0.7.0 h1:LWuuqPQ1oa6x7BnmUOuo/aGwdX85QGhWZUBYWWW3zdk= -github.com/pyroscope-io/client v0.7.0/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= -github.com/pyroscope-io/godeltaprof v0.1.0 h1:UBqtjt0yZi4jTxqZmLAs34XG6ycS3vUTlhEUSq4NHLE= -github.com/pyroscope-io/godeltaprof v0.1.0/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= +github.com/pyroscope-io/client v0.7.2 h1:OX2qdUQsS8RSkn/3C8isD7f/P0YiZQlRbAlecAaj/R8= +github.com/pyroscope-io/client v0.7.2/go.mod h1:FEocnjn+Ngzxy6EtU9ZxXWRvQ0+pffkrBxHLnPpxwi8= +github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= +github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -524,8 +556,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= -github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -538,41 +570,56 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= -github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/wealdtech/go-eth2-types/v2 v2.8.1 h1:y2N3xSIZ3tVqsnvj4AgPkh48U5sM612vhZwlK3k+3lM= +github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.3.1 h1:NlWiq9cUd69xFvhAdCRpz7CwfDjMuz8cEvPQ9yponT4= +github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.3.1/go.mod h1:luy/Y/I3gC3JxT0mQBKqysvzRN1DiFnwqUij8Yc2SP4= +github.com/wealdtech/go-eth2-wallet-types/v2 v2.10.1 h1:RRJhZ9M3S2Vh5k1SLwQmyA4NZ7E1HM4QnnHhiUySFdk= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -602,8 +649,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -654,17 +701,18 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -676,12 +724,11 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -714,30 +761,33 @@ golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -746,15 +796,17 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -788,12 +840,13 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= @@ -847,8 +900,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -860,8 +913,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -877,8 +928,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/monitor/finality_vote.go b/monitor/finality_vote.go new file mode 100644 index 0000000000..6106027f61 --- /dev/null +++ b/monitor/finality_vote.go @@ -0,0 +1,158 @@ +package monitor + +import ( + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/consortium/v2/finality" + "github.com/ethereum/go-ethereum/core/types" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" + "github.com/ethereum/go-ethereum/log" + lru "github.com/hashicorp/golang-lru" +) + +const finalityVoteCache = 100 + +type blockInformation struct { + blockHash common.Hash + voterPublicKey []blsCommon.PublicKey + voterAddress []common.Address +} + +type FinalityVoteMonitor struct { + chain consensus.ChainHeaderReader + engine consensus.FastFinalityPoSA + observedVotes *lru.Cache +} + +func NewFinalityVoteMonitor( + chain consensus.ChainHeaderReader, + engine consensus.FastFinalityPoSA, +) (*FinalityVoteMonitor, error) { + observedVotes, err := lru.New(finalityVoteCache) + if err != nil { + return nil, err + } + + return &FinalityVoteMonitor{ + engine: engine, + observedVotes: observedVotes, + }, nil +} + +func prettyPrintPublicKey(publicKey []blsCommon.PublicKey) string { + result := "[ " + for _, key := range publicKey { + result += common.Bytes2Hex(key.Marshal()) + ", " + } + + return result + " ]" +} + +func prettyPrintAddress(addresses []common.Address) string { + result := "[ " + for _, address := range addresses { + result += address.String() + ", " + } + + return result + " ]" +} + +func (monitor *FinalityVoteMonitor) CheckFinalityVote(block *types.Block) error { + extraData, err := finality.DecodeExtra(block.Extra(), true) + // This should not happen because the block has been verified + if err != nil { + log.Error("Unexpected error when decode extradata", "err", err) + return err + } + + if extraData.HasFinalityVote == 1 { + blockValidator := monitor.engine.GetActiveValidatorAt( + monitor.chain, + block.NumberU64()-1, + block.ParentHash(), + ) + + var ( + voterPublicKey []blsCommon.PublicKey + voterAddress []common.Address + ) + + position := extraData.FinalityVotedValidators.Indices() + for _, pos := range position { + voterPublicKey = append(voterPublicKey, blockValidator[pos].BlsPublicKey) + voterAddress = append(voterAddress, blockValidator[pos].Address) + } + + return monitor.checkSameHeightVote( + block.NumberU64(), + block.Hash(), + voterPublicKey, + voterAddress, + ) + } + + return nil +} + +func (monitor *FinalityVoteMonitor) checkSameHeightVote( + blockNumber uint64, + blockHash common.Hash, + voterPublicKey []blsCommon.PublicKey, + voterAddress []common.Address, +) error { + rawBlockInfo, ok := monitor.observedVotes.Get(blockNumber) + if !ok { + monitor.observedVotes.Add(blockNumber, []blockInformation{ + { + blockHash: blockHash, + voterPublicKey: voterPublicKey, + voterAddress: voterAddress, + }, + }) + return nil + } + + violated := false + blockInfo := rawBlockInfo.([]blockInformation) + + for _, block := range blockInfo { + // 2 blocks are the same, it's not likely to happen + if block.blockHash == blockHash { + continue + } + + for _, cachePublicKey := range block.voterPublicKey { + for _, blockPublicKey := range voterPublicKey { + if blockPublicKey.Equals(cachePublicKey) { + log.Error( + "Fast finality rule is violated", + "voter public key", common.Bytes2Hex(blockPublicKey.Marshal()), + "block number", blockNumber, + "block 1 hash", block.blockHash, + "block 1 voter public key", prettyPrintPublicKey(block.voterPublicKey), + "block 1 voter address", prettyPrintAddress(block.voterAddress), + "block 2 hash", blockHash, + "block 2 voter public key", prettyPrintPublicKey(voterPublicKey), + "block 2 voter address", prettyPrintAddress(voterAddress), + ) + violated = true + } + } + } + } + + blockInfo = append(blockInfo, blockInformation{ + blockHash: blockHash, + voterPublicKey: voterPublicKey, + voterAddress: voterAddress, + }) + + monitor.observedVotes.Add(blockNumber, blockInfo) + + if violated { + return errors.New("finality rule violated") + } + return nil +} diff --git a/monitor/finality_vote_test.go b/monitor/finality_vote_test.go new file mode 100644 index 0000000000..677a36dfb7 --- /dev/null +++ b/monitor/finality_vote_test.go @@ -0,0 +1,46 @@ +package monitor + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/bls/blst" + blsCommon "github.com/ethereum/go-ethereum/crypto/bls/common" +) + +func TestCheckSameHeightVote(t *testing.T) { + monitor, err := NewFinalityVoteMonitor(nil, nil) + if err != nil { + t.Fatalf("Failed to create finality vote monitor, err %s", err) + } + + key1, err := blst.RandKey() + if err != nil { + t.Fatalf("Failed to create bls key, err %s", err) + } + address1 := common.Address{0x1} + + key2, err := blst.RandKey() + if err != nil { + t.Fatalf("Failed to create bls key, err %s", err) + } + address2 := common.Address{0x2} + + voterPublicKey := []blsCommon.PublicKey{key1.PublicKey()} + voterAddress := []common.Address{address1} + if monitor.checkSameHeightVote(0, common.Hash{0x1}, voterPublicKey, voterAddress) != nil { + t.Fatalf("Expect no error when checkSameHeightVote") + } + + voterPublicKey = []blsCommon.PublicKey{key2.PublicKey()} + voterAddress = []common.Address{address2} + if monitor.checkSameHeightVote(0, common.Hash{0x2}, voterPublicKey, voterAddress) != nil { + t.Fatalf("Expect no error when checkSameHeightVote") + } + + voterPublicKey = []blsCommon.PublicKey{key2.PublicKey()} + voterAddress = []common.Address{address2} + if monitor.checkSameHeightVote(0, common.Hash{0x3}, voterPublicKey, voterAddress) == nil { + t.Fatalf("Expect error when checkSameHeightVote") + } +} diff --git a/node/config.go b/node/config.go index 4c1325f777..5e860ddb06 100644 --- a/node/config.go +++ b/node/config.go @@ -199,6 +199,14 @@ type Config struct { // AllowUnprotectedTxs allows non EIP-155 protected transactions to be send over RPC. AllowUnprotectedTxs bool `toml:",omitempty"` + + // The maximum finality vote per current block + MaxCurVoteAmountPerBlock int + EnableFastFinality bool + EnableFastFinalitySign bool + // The path of password and encrypted BLS secret key used for fast finality voting + BlsPasswordPath string + BlsWalletPath string } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/p2p/peer.go b/p2p/peer.go index 9010e9a51e..0751900588 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -129,6 +129,16 @@ func NewPeer(id enode.ID, name string, caps []Cap) *Peer { return peer } +// NewPeerWithProtocol returns a peer for testing purposes. +func NewPeerWithProtocol(id enode.ID, name string, caps []Cap, protocols []Protocol) *Peer { + pipe, _ := net.Pipe() + node := enode.SignNull(new(enr.Record), id) + conn := &conn{fd: pipe, transport: nil, node: node, caps: caps, name: name} + peer := newPeer(log.Root(), conn, protocols) + close(peer.closed) // ensures Disconnect doesn't block + return peer +} + // NewPeerPipe creates a peer for testing purposes. // The message pipe given as the last parameter is closed when // Disconnect is called on the peer. @@ -138,6 +148,15 @@ func NewPeerPipe(id enode.ID, name string, caps []Cap, pipe *MsgPipeRW) *Peer { return p } +// NewPeerPipeWithProtocol creates a peer for testing purposes. +// The message pipe given as the last parameter is closed when +// Disconnect is called on the peer. +func NewPeerPipeWithProtocol(id enode.ID, name string, caps []Cap, pipe *MsgPipeRW, protocols []Protocol) *Peer { + p := NewPeerWithProtocol(id, name, caps, protocols) + p.testPipe = pipe + return p +} + // ID returns the node's public key. func (p *Peer) ID() enode.ID { return p.rw.node.ID() diff --git a/params/config.go b/params/config.go index 12c45d35af..08d35149ea 100644 --- a/params/config.go +++ b/params/config.go @@ -26,6 +26,12 @@ import ( "golang.org/x/crypto/sha3" ) +const ( + BLSSignatureLength = 96 // BLSSignatureLength defines the byte length of a BLSSignature. + BLSSecretKeyLength = 32 + BLSPubkeyLength = 48 // BLSPubkeyLength defines the byte length of a BLSSignature. +) + // Genesis hashes to enforce below configs on. var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") @@ -34,6 +40,7 @@ var ( RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") RoninMainnetGenesisHash = common.HexToHash("0x6e675ee97607f4e695188786c3c1853fb1562f1c075629eb5dbcff269422a1a4") + RoninTestnetGenesisHash = common.HexToHash("0x13e47595099383189b8b0d5f3b67aa161495e478bb3fea64f4cf85cdf69cac4d") ) // TrustedCheckpoints associates each known checkpoint with the genesis hash of @@ -235,6 +242,9 @@ var ( RoninMainnetBlacklistContract = common.HexToAddress("0x313b24994c93FA0471CB4D7aB796b07467041806") RoninMainnetFenixValidatorContractAddress = common.HexToAddress("0x7f13232Bdc3a010c3f749a1c25bF99f1C053CE70") + RoninMainnetRoninValidatorSetAddress = common.HexToAddress("0x617c5d73662282EA7FfD231E020eCa6D2B0D552f") + RoninMainnetSlashIndicatorAddress = common.HexToAddress("0xEBFFF2b32fA0dF9C5C8C5d5AAa7e8b51d5207bA3") + RoninMainnetStakingContractAddress = common.HexToAddress("0x545edb750eB8769C868429BE9586F5857A768758") RoninMainnetChainConfig = &ChainConfig{ ChainID: big.NewInt(2020), @@ -251,9 +261,60 @@ var ( BlacklistContractAddress: &RoninMainnetBlacklistContract, FenixValidatorContractAddress: &RoninMainnetFenixValidatorContractAddress, Consortium: &ConsortiumConfig{ - Period: 3, - Epoch: 600, + Period: 3, + Epoch: 600, + EpochV2: 200, + }, + ConsortiumV2Contracts: &ConsortiumV2Contracts{ + RoninValidatorSet: RoninMainnetRoninValidatorSetAddress, + SlashIndicator: RoninMainnetSlashIndicatorAddress, + StakingContract: RoninMainnetStakingContractAddress, + }, + ConsortiumV2Block: big.NewInt(23155200), + PuffyBlock: big.NewInt(0), + BubaBlock: big.NewInt(0), + OlekBlock: big.NewInt(24935500), + } + + RoninTestnetBlacklistContract = common.HexToAddress("0xF53EED5210c9cF308abFe66bA7CF14884c95A8aC") + RoninTestnetFenixValidatorContractAddress = common.HexToAddress("0x1454cAAd1637b662432Bb795cD5773d21281eDAb") + RoninTestnetRoninValidatorSetAddress = common.HexToAddress("0x54B3AC74a90E64E8dDE60671b6fE8F8DDf18eC9d") + RoninTestnetSlashIndicatorAddress = common.HexToAddress("0xF7837778b6E180Df6696C8Fa986d62f8b6186752") + RoninTestnetStakingContractAddress = common.HexToAddress("0x9C245671791834daf3885533D24dce516B763B28") + RoninTestnetProfileContractAddress = common.HexToAddress("0x3b67c8D22a91572a6AB18acC9F70787Af04A4043") + RoninTestnetFinalityTrackingAddress = common.HexToAddress("0x41aCDFe786171824a037f2Cd6224c5916A58969a") + + RoninTestnetChainConfig = &ChainConfig{ + ChainID: big.NewInt(2021), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + OdysseusBlock: big.NewInt(3315095), + FenixBlock: big.NewInt(6770400), + BlacklistContractAddress: &RoninTestnetBlacklistContract, + FenixValidatorContractAddress: &RoninTestnetFenixValidatorContractAddress, + Consortium: &ConsortiumConfig{ + Period: 3, + Epoch: 30, + EpochV2: 200, }, + ConsortiumV2Contracts: &ConsortiumV2Contracts{ + RoninValidatorSet: RoninTestnetRoninValidatorSetAddress, + SlashIndicator: RoninTestnetSlashIndicatorAddress, + StakingContract: RoninTestnetStakingContractAddress, + ProfileContract: RoninTestnetProfileContractAddress, + FinalityTracking: RoninTestnetFinalityTrackingAddress, + }, + ConsortiumV2Block: big.NewInt(11706000), + PuffyBlock: big.NewInt(12254000), + BubaBlock: big.NewInt(14260600), + OlekBlock: big.NewInt(16849000), + ShillinBlock: big.NewInt(20268000), } // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. @@ -464,6 +525,8 @@ type ChainConfig struct { BubaBlock *big.Int `json:"bubaBlock,omitempty"` // Buba switch block (nil = no fork, 0 = already on activated) // Olek hardfork reduces the delay in block time of out of turn miner OlekBlock *big.Int `json:"olekBlock,omitempty"` // Olek switch block (nil = no fork, 0 = already on activated) + // Shillin hardfork introduces fast finality + ShillinBlock *big.Int `json:"shillinBlock,omitempty"` // Shillin switch block (nil = no fork, 0 = already on activated) ComingForkBlock *big.Int `json:"comingForkBlock,omitempty"` // ComingForkBlock switch block (nil = no fork, 0 = already on activated) BlacklistContractAddress *common.Address `json:"blacklistContractAddress,omitempty"` // Address of Blacklist Contract (nil = no blacklist) @@ -515,6 +578,8 @@ type ConsortiumV2Contracts struct { StakingContract common.Address `json:"stakingContract"` RoninValidatorSet common.Address `json:"roninValidatorSet"` SlashIndicator common.Address `json:"slashIndicator"` + ProfileContract common.Address `json:"profileContract"` + FinalityTracking common.Address `json:"finalityTracking"` } func (c *ConsortiumV2Contracts) IsSystemContract(address common.Address) bool { @@ -556,10 +621,21 @@ func (c *ChainConfig) String() string { stakingContract = c.ConsortiumV2Contracts.StakingContract } + profileContract := common.HexToAddress("") + if c.ConsortiumV2Contracts != nil { + profileContract = c.ConsortiumV2Contracts.ProfileContract + } + + finalityTrackingContract := common.HexToAddress("") + if c.ConsortiumV2Contracts != nil { + finalityTrackingContract = c.ConsortiumV2Contracts.FinalityTracking + } + chainConfigFmt := "{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v " chainConfigFmt += "Petersburg: %v Istanbul: %v, Odysseus: %v, Fenix: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, " chainConfigFmt += "Engine: %v, Blacklist Contract: %v, Fenix Validator Contract: %v, ConsortiumV2: %v, ConsortiumV2.RoninValidatorSet: %v, " - chainConfigFmt += "ConsortiumV2.SlashIndicator: %v, ConsortiumV2.StakingContract: %v, Puffy: %v, Buba: %v, Olek: %v}" + chainConfigFmt += "ConsortiumV2.SlashIndicator: %v, ConsortiumV2.StakingContract: %v, Puffy: %v, Buba: %v, Olek: %v, Shillin: %v, " + chainConfigFmt += "ConsortiumV2.ProfileContract: %v, ConsortiumV2.FinalityTracking: %v}" return fmt.Sprintf(chainConfigFmt, c.ChainID, @@ -589,6 +665,9 @@ func (c *ChainConfig) String() string { c.PuffyBlock, c.BubaBlock, c.OlekBlock, + c.ShillinBlock, + profileContract.Hex(), + finalityTrackingContract.Hex(), ) } @@ -705,6 +784,10 @@ func (c *ChainConfig) IsOlek(num *big.Int) bool { // IsConsortiumV2 returns whether the num is equals to or larger than the consortiumV2 fork block. func (c *ChainConfig) IsComingFork(num *big.Int) bool { return isForked(c.ComingForkBlock, num) + +// IsShillin returns whether the num is equals to or larger than the shillin fork block. +func (c *ChainConfig) IsShillin(num *big.Int) bool { + return isForked(c.ShillinBlock, num) } // CheckCompatible checks whether scheduled fork transitions have been imported @@ -826,6 +909,21 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.FenixBlock, newcfg.FenixBlock, head) { return newCompatError("Fenix fork block", c.FenixBlock, newcfg.FenixBlock) } + if isForkIncompatible(c.ConsortiumV2Block, newcfg.ConsortiumV2Block, head) { + return newCompatError("Consortium v2 fork block", c.ConsortiumV2Block, newcfg.ConsortiumV2Block) + } + if isForkIncompatible(c.PuffyBlock, newcfg.PuffyBlock, head) { + return newCompatError("Puffy fork block", c.PuffyBlock, newcfg.PuffyBlock) + } + if isForkIncompatible(c.BubaBlock, newcfg.BubaBlock, head) { + return newCompatError("Buba fork block", c.BubaBlock, newcfg.BubaBlock) + } + if isForkIncompatible(c.OlekBlock, newcfg.OlekBlock, head) { + return newCompatError("Olek fork block", c.OlekBlock, newcfg.OlekBlock) + } + if isForkIncompatible(c.ShillinBlock, newcfg.ShillinBlock, head) { + return newCompatError("Shillin fork block", c.ShillinBlock, newcfg.ShillinBlock) + } return nil } diff --git a/params/protocol_params.go b/params/protocol_params.go index 651f4bb956..63b3914575 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -153,6 +153,8 @@ const ( Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation + ValidateFinalityProofGas uint64 = 200000 // Gas for validating finality proof + // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529, // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529 RefundQuotient uint64 = 2 diff --git a/params/version.go b/params/version.go index 2b34347d79..11339aab4d 100644 --- a/params/version.go +++ b/params/version.go @@ -22,8 +22,8 @@ import ( const ( VersionMajor = 2 // Major version component of the current release - VersionMinor = 5 // Minor version component of the current release - VersionPatch = 4 // Patch version component of the current release + VersionMinor = 6 // Minor version component of the current release + VersionPatch = 0 // Patch version component of the current release VersionMeta = "" // Version metadata to append to the version string ) diff --git a/rpc/types.go b/rpc/types.go index ca52d474d9..5be87971b4 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -58,9 +58,10 @@ type jsonWriter interface { type BlockNumber int64 const ( - PendingBlockNumber = BlockNumber(-2) - LatestBlockNumber = BlockNumber(-1) - EarliestBlockNumber = BlockNumber(0) + FinalizedBlockNumber = BlockNumber(-3) + PendingBlockNumber = BlockNumber(-2) + LatestBlockNumber = BlockNumber(-1) + EarliestBlockNumber = BlockNumber(0) ) // UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: @@ -85,6 +86,9 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "pending": *bn = PendingBlockNumber return nil + case "finalized": + *bn = FinalizedBlockNumber + return nil } blckNum, err := hexutil.DecodeUint64(input) @@ -109,6 +113,8 @@ func (bn BlockNumber) MarshalText() ([]byte, error) { return []byte("latest"), nil case PendingBlockNumber: return []byte("pending"), nil + case FinalizedBlockNumber: + return []byte("finalized"), nil default: return hexutil.Uint64(bn).MarshalText() } @@ -155,6 +161,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { bn := PendingBlockNumber bnh.BlockNumber = &bn return nil + case "finalized": + bn := FinalizedBlockNumber + bnh.BlockNumber = &bn + return nil default: if len(input) == 66 { hash := common.Hash{} diff --git a/script/gen_dns_txt.sh b/script/gen_dns_txt.sh new file mode 100755 index 0000000000..3e8487bfbe --- /dev/null +++ b/script/gen_dns_txt.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +# Create the DNS TXT record from the list of nodes + +unset BOOTNODES +unset NETWORK +unset DOMAIN +unset IP_LIST +PROGNAME=$(basename "$0") + +USAGE="$PROGNAME -b bootnodes -n network -d domain [-i ip_list] + +where: + bootnodes: list of bootnodes to crawl from (comma separated) + network: ronin-mainnet | ronin-testnet to choose the network + domain: the DNS domain name + ip_list: the list of IPs to include in ENR tree (comma separated) +" + +set -e + +while getopts :hb:n:d:i: option; do + case $option in + h) + echo "$USAGE" + exit 0 + ;; + b) BOOTNODES=$OPTARG ;; + n) NETWORK=$OPTARG ;; + d) DOMAIN=$OPTARG ;; + i) IP_LIST=$OPTARG ;; + :) + echo "$PROGNAME: option requires an argument -- '$OPTARG'" >&2 + exit 1 + ;; + *) + echo "$PROGNAME: bad option -$OPTARG" >&2 + echo "Try '$PROGNAME -h' for more information" >&2 + exit 1 + ;; + esac +done +shift $((OPTIND - 1)) + +if [[ -z $BOOTNODES || -z $NETWORK || -z $DOMAIN ]]; then + echo "$PROGNAME: missing mandatory option" >&2 + echo "Try '$(basename "$0") -h' for more information" >&2 + exit 1 +fi + +DNS_DIR=dns_record + +if [[ -z $PRIVATE_KEY ]]; then + echo "The PRIVATE_KEY environment for signing DNS record must be provided" + exit 1 +fi + +if [[ -z $PASSWORD ]]; then + echo "The PASSWORD environment for decrypting the private key must be provided" + exit 1 +fi + +echo "$PASSWORD" > password +echo "$PRIVATE_KEY" > private_key +unset PASSWORD +unset PRIVATE_KEY + +set -x +# Create an encrypted keyfile.json +ethkey generate --passwordfile password --privatekey private_key + +devp2p discv4 crawl --timeout 1m --bootnodes $BOOTNODES all_nodes.json + +mkdir -p $DNS_DIR + +set +x +IP_LIST_PARAMS="" +if [[ ! -z $IP_LIST ]]; then + IP_LIST_PARAMS="-ip-list $IP_LIST" +fi +set -x + +devp2p nodeset filter all_nodes.json -eth-network $NETWORK $IP_LIST_PARAMS > $DNS_DIR/nodes.json +devp2p dns sign $DNS_DIR keyfile.json password --domain $DOMAIN +devp2p dns to-txt $DNS_DIR $DNS_DIR/txt_record.json + +echo "Cleanup files" +rm private_key +rm password +rm keyfile.json