From 9da00cd8c0a4a5124d47420777b4fa76b0bb90a8 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 29 Apr 2024 17:29:07 +0200 Subject: [PATCH 01/73] WIP: manually bring over RIP-7560 related code; compiles but does not run --- core/state_processor_rip7560.go | 653 +++++++++++++++++++++++++ core/state_transition.go | 2 + core/txpool/blobpool/blobpool.go | 15 + core/txpool/legacypool/legacypool.go | 17 + core/txpool/rip7560pool/rip7560pool.go | 273 +++++++++++ core/txpool/subpool.go | 6 + core/txpool/txpool_rip7560.go | 46 ++ core/types/transaction.go | 7 + core/types/tx_rip7560.go | 221 +++++++++ eth/api_backend_rip7560.go | 15 + eth/backend.go | 9 +- internal/ethapi/backend.go | 4 + internal/ethapi/rip7560api.go | 62 +++ miner/worker.go | 24 + params/config.go | 5 + 15 files changed, 1358 insertions(+), 1 deletion(-) create mode 100644 core/state_processor_rip7560.go create mode 100644 core/txpool/rip7560pool/rip7560pool.go create mode 100644 core/txpool/txpool_rip7560.go create mode 100644 core/types/tx_rip7560.go create mode 100644 eth/api_backend_rip7560.go create mode 100644 internal/ethapi/rip7560api.go diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go new file mode 100644 index 000000000000..c0c3d9f7ae7a --- /dev/null +++ b/core/state_processor_rip7560.go @@ -0,0 +1,653 @@ +package core + +import ( + "encoding/binary" + "errors" + "fmt" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" + "math/big" + "strings" +) + +type ValidationPhaseResult struct { + TxIndex int + Tx *types.Transaction + TxHash common.Hash + PaymasterContext []byte + DeploymentUsedGas uint64 + ValidationUsedGas uint64 + PmValidationUsedGas uint64 + SenderValidAfter uint64 + SenderValidUntil uint64 + PmValidAfter uint64 + PmValidUntil uint64 + IsEOA bool +} + +// HandleRip7560Transactions apply state changes of all sequential RIP-7560 transactions and return +// the number of handled transactions +// the transactions array must start with the RIP-7560 transaction +func HandleRip7560Transactions(transactions []*types.Transaction, index int, statedb *state.StateDB, coinbase *common.Address, header *types.Header, gp *GasPool, chainConfig *params.ChainConfig, bc ChainContext, cfg vm.Config) ([]*types.Transaction, types.Receipts, []*types.Log, error) { + validatedTransactions := make([]*types.Transaction, 0) + receipts := make([]*types.Receipt, 0) + allLogs := make([]*types.Log, 0) + + i := index + for { + if i >= len(transactions) { + break + } + if transactions[i].Type() != types.Rip7560Type { + break + } + iTransactions, iReceipts, iLogs, err := handleRip7560Transactions(transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg) + if err != nil { + return nil, nil, nil, err + } + validatedTransactions = append(validatedTransactions, iTransactions...) + receipts = append(receipts, iReceipts...) + allLogs = append(allLogs, iLogs...) + } + return validatedTransactions, receipts, allLogs, nil +} + +func handleRip7560Transactions(transactions []*types.Transaction, index int, statedb *state.StateDB, coinbase *common.Address, header *types.Header, gp *GasPool, chainConfig *params.ChainConfig, bc ChainContext, cfg vm.Config) ([]*types.Transaction, types.Receipts, []*types.Log, error) { + validationPhaseResults := make([]*ValidationPhaseResult, 0) + validatedTransactions := make([]*types.Transaction, 0) + receipts := make([]*types.Receipt, 0) + allLogs := make([]*types.Log, 0) + signer := types.MakeSigner(chainConfig, header.Number, header.Time) + for i, tx := range transactions[index:] { + if tx.Type() != types.Rip7560Type { + break + } + + aatx := tx.Rip7560TransactionData() + isEoa, err := isTransactionEOA(tx, statedb, signer) + if err != nil { + return nil, nil, nil, err + } + statedb.SetTxContext(tx.Hash(), index+i) + err = BuyGasRip7560Transaction(aatx, statedb) + var vpr *ValidationPhaseResult + if isEoa { + blockContext := NewEVMBlockContext(header, bc, coinbase) + tmpMsg, err := TransactionToMessage(tx, signer, header.BaseFee) + txContext := NewEVMTxContext(tmpMsg) + evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) + //signer := types.MakeSigner(chainConfig, header.Number, header.Time) + signingHash := signer.Hash(tx) + vpr, err = validateRip7560TransactionFromEOA(tx, signingHash, statedb, evm, coinbase, header, gp, chainConfig) + if err != nil { + return nil, nil, nil, err + } + } else { + if err != nil { + return nil, nil, nil, err + } + vpr, err = ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) + if err != nil { + return nil, nil, nil, err + } + } + validationPhaseResults = append(validationPhaseResults, vpr) + validatedTransactions = append(validatedTransactions, tx) + } + for i, vpr := range validationPhaseResults { + + // TODO: this will miss all validation phase events - pass in 'vpr' + statedb.SetTxContext(vpr.Tx.Hash(), i) + + receipt, err := ApplyRip7560ExecutionPhase(chainConfig, vpr, bc, coinbase, gp, statedb, header, cfg) + + if err != nil { + return nil, nil, nil, err + } + receipts = append(receipts, receipt) + allLogs = append(allLogs, receipt.Logs...) + } + return validatedTransactions, receipts, allLogs, nil +} + +// BuyGasRip7560Transaction +// todo: move to a suitable interface, whatever that is +// todo 2: maybe handle the "shared gas pool" situation instead of just overriding it completely? +func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB) error { + gasLimit := st.Gas + st.ValidationGas + st.PaymasterGas + st.PostOpGas + mgval := new(uint256.Int).SetUint64(gasLimit) + gasFeeCap, _ := uint256.FromBig(st.GasFeeCap) + mgval = mgval.Mul(mgval, gasFeeCap) + balanceCheck := new(uint256.Int).Set(mgval) + + chargeFrom := *st.Sender + + if len(st.PaymasterData) >= 20 { + chargeFrom = [20]byte(st.PaymasterData[:20]) + } + + if have, want := state.GetBalance(chargeFrom), balanceCheck; have.Cmp(want) < 0 { + return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) + } + + state.SubBalance(chargeFrom, mgval, 0) + return nil +} + +// TODO: not needed with subtype - only use to validate transaction, maybe +func isTransactionEOA(tx *types.Transaction, statedb *state.StateDB, signer types.Signer) (bool, error) { + aatx := tx.Rip7560TransactionData() + senderHasCode := statedb.GetCodeSize(*aatx.Sender) != 0 || len(aatx.DeployerData) != 0 + if senderHasCode { + return false, nil + } + address, err := signer.Sender(tx) + if err != nil { + return false, err + } + if address.Cmp(*tx.Rip7560TransactionData().Sender) != 0 { + return false, errors.New("recovered signature does not match the claimed EOA sender") + } + return true, nil +} + +func validateRip7560TransactionFromEOA(tx *types.Transaction, signingHash common.Hash, statedb *state.StateDB, evm *vm.EVM, coinbase *common.Address, header *types.Header, gp *GasPool, chainConfig *params.ChainConfig) (*ValidationPhaseResult, error) { + // TODO: paymaste is actually optional for eoa-type-4 -> check paymaster data len() + paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) + if err != nil { + return nil, err + } + err = validateValidityTimeRange(header.Time, pmValidAfter, pmValidUntil) + if err != nil { + return nil, err + } + vpr := &ValidationPhaseResult{ + Tx: tx, + TxHash: tx.Hash(), + PaymasterContext: paymasterContext, + DeploymentUsedGas: 0, + ValidationUsedGas: 0, + PmValidationUsedGas: pmValidationUsedGas, + SenderValidAfter: 0, + SenderValidUntil: 0, + PmValidAfter: pmValidAfter, + PmValidUntil: pmValidUntil, + IsEOA: true, + } + return vpr, nil +} + +func ApplyRip7560FrameMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) { + return NewRip7560StateTransition(evm, msg, gp).TransitionDb() +} + +// NewRip7560StateTransition initialises and returns a new state transition object. +func NewRip7560StateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTransition { + return &StateTransition{ + gp: gp, + evm: evm, + msg: msg, + state: evm.StateDB, + rip7560Frame: true, + } +} + +// GetRip7560AccountNonce reads the two-dimensional RIP-7560 nonce from the given blockchain state +func GetRip7560AccountNonce(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config, sender common.Address, nonceKey *big.Int) uint64 { + + // todo: this is a copy paste of 5 lines that need 8 parameters to run, wtf? + blockContext := NewEVMBlockContext(header, bc, author) + message, err := TransactionToMessage(tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) + txContext := NewEVMTxContext(message) + vmenv := vm.NewEVM(blockContext, txContext, statedb, config, cfg) + vmenv.Reset(txContext, statedb) // TODO what does this 'reset' do? + + from := common.HexToAddress("0x0000000000000000000000000000000000000000") + // todo: read NM address from global config + nonceManager := common.HexToAddress("0xdebc121d1b09bc03ff57fa1f96514d04a1f0f59d") + fromBigNonceKey256, _ := uint256.FromBig(nonceKey) + key := make([]byte, 24) + fromBigNonceKey256.WriteToSlice(key) + nonceManagerData := make([]byte, 0) + nonceManagerData = append(nonceManagerData[:], sender.Bytes()...) + nonceManagerData = append(nonceManagerData[:], key...) + + nonceManagerMsg := &Message{ + From: from, + To: &nonceManager, + Value: big.NewInt(0), + GasLimit: 100000, + GasPrice: big.NewInt(875000000), + GasFeeCap: big.NewInt(875000000), + GasTipCap: big.NewInt(875000000), + Data: nonceManagerData, + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + } + resultNonceManager, err := ApplyRip7560FrameMessage(vmenv, nonceManagerMsg, gp) + if err != nil { + // todo: handle + return 777 + } + if resultNonceManager.Err != nil { + return 888 + } + if resultNonceManager.ReturnData == nil { + return 999 + } + return big.NewInt(0).SetBytes(resultNonceManager.ReturnData).Uint64() +} + +func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { + /*** Nonce Manger Frame ***/ + nonceManagerMsg := prepareNonceManagerMessage(tx, chainConfig) + + blockContext := NewEVMBlockContext(header, bc, author) + txContext := NewEVMTxContext(nonceManagerMsg) + txContext.Origin = *tx.Rip7560TransactionData().Sender + evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) + + resultNonceManager, err := ApplyRip7560FrameMessage(evm, nonceManagerMsg, gp) + if err != nil { + return nil, err + } + statedb.IntermediateRoot(true) + if resultNonceManager.Err != nil { + return nil, resultNonceManager.Err + } + + /*** Deployer Frame ***/ + deployerMsg := prepareDeployerMessage(tx, chainConfig) + var deploymentUsedGas uint64 + if deployerMsg != nil { + resultDeployer, err := ApplyRip7560FrameMessage(evm, deployerMsg, gp) + if err != nil { + return nil, err + } + statedb.IntermediateRoot(true) + if resultDeployer.Failed() { + // TODO: bubble up the inner error message to the user, if possible + return nil, errors.New("account deployment failed - invalid transaction") + } + deploymentUsedGas = resultDeployer.UsedGas + } + + /*** Account Validation Frame ***/ + signer := types.MakeSigner(chainConfig, header.Number, header.Time) + signingHash := signer.Hash(tx) + accountValidationMsg, err := prepareAccountValidationMessage(tx, chainConfig, signingHash, deploymentUsedGas) + resultAccountValidation, err := ApplyRip7560FrameMessage(evm, accountValidationMsg, gp) + if err != nil { + return nil, err + } + statedb.IntermediateRoot(true) + if resultAccountValidation.Err != nil { + return nil, resultAccountValidation.Err + } + validAfter, validUntil, err := validateAccountReturnData(resultAccountValidation.ReturnData) + if err != nil { + return nil, err + } + err = validateValidityTimeRange(header.Time, validAfter, validUntil) + if err != nil { + return nil, err + } + + paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) + vpr := &ValidationPhaseResult{ + Tx: tx, + TxHash: tx.Hash(), + PaymasterContext: paymasterContext, + DeploymentUsedGas: deploymentUsedGas, + ValidationUsedGas: resultAccountValidation.UsedGas, + PmValidationUsedGas: pmValidationUsedGas, + SenderValidAfter: validAfter, + SenderValidUntil: validUntil, + PmValidAfter: pmValidAfter, + PmValidUntil: pmValidUntil, + IsEOA: false, + } + + return vpr, nil +} + +func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, uint64, uint64, uint64, error) { + /*** Paymaster Validation Frame ***/ + var pmValidationUsedGas uint64 + var paymasterContext []byte + var pmValidAfter uint64 + var pmValidUntil uint64 + paymasterMsg, err := preparePaymasterValidationMessage(tx, chainConfig, signingHash) + if err != nil { + return nil, 0, 0, 0, err + } + if paymasterMsg != nil { + resultPm, err := ApplyRip7560FrameMessage(evm, paymasterMsg, gp) + if err != nil { + return nil, 0, 0, 0, err + } + statedb.IntermediateRoot(true) + if resultPm.Failed() { + return nil, 0, 0, 0, errors.New("paymaster validation failed - invalid transaction") + } + pmValidationUsedGas = resultPm.UsedGas + paymasterContext, pmValidAfter, pmValidUntil, err = validatePaymasterReturnData(resultPm.ReturnData) + if err != nil { + return nil, 0, 0, 0, err + } + err = validateValidityTimeRange(header.Time, pmValidAfter, pmValidUntil) + if err != nil { + return nil, 0, 0, 0, err + } + } + return paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, nil +} + +func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) (*ExecutionResult, error) { + var paymasterPostOpResult *ExecutionResult + paymasterPostOpMsg, err := preparePostOpMessage(vpr, evm.ChainConfig(), executionResult) + if err != nil { + return nil, err + } + paymasterPostOpResult, err = ApplyRip7560FrameMessage(evm, paymasterPostOpMsg, gp) + if err != nil { + return nil, err + } + // TODO: revert the execution phase changes + return paymasterPostOpResult, nil +} + +func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhaseResult, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, cfg vm.Config) (*types.Receipt, error) { + + // TODO: snapshot EVM - we will revert back here if postOp fails + + blockContext := NewEVMBlockContext(header, bc, author) + message, err := TransactionToMessage(vpr.Tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) + txContext := NewEVMTxContext(message) + txContext.Origin = *vpr.Tx.Rip7560TransactionData().Sender + evm := vm.NewEVM(blockContext, txContext, statedb, config, cfg) + + var accountExecutionMsg *Message + if vpr.IsEOA { + accountExecutionMsg, err = prepareEOATargetExecutionMessage(vpr.Tx) + if err != nil { + return nil, err + } + } else { + accountExecutionMsg = prepareAccountExecutionMessage(vpr.Tx, evm.ChainConfig()) + } + executionResult, err := ApplyRip7560FrameMessage(evm, accountExecutionMsg, gp) + if err != nil { + return nil, err + } + root := statedb.IntermediateRoot(true).Bytes() + var paymasterPostOpResult *ExecutionResult + if len(vpr.PaymasterContext) != 0 { + paymasterPostOpResult, err = applyPaymasterPostOpFrame(vpr, executionResult, evm, gp, statedb, header) + root = statedb.IntermediateRoot(true).Bytes() + } + if err != nil { + return nil, err + } + + cumulativeGasUsed := + vpr.ValidationUsedGas + + vpr.DeploymentUsedGas + + vpr.PmValidationUsedGas + + executionResult.UsedGas + if paymasterPostOpResult != nil { + cumulativeGasUsed += + paymasterPostOpResult.UsedGas + } + + receipt := &types.Receipt{Type: vpr.Tx.Type(), PostState: root, CumulativeGasUsed: cumulativeGasUsed} + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(vpr.Tx.Hash(), header.Number.Uint64(), header.Hash()) + + if executionResult.Failed() || (paymasterPostOpResult != nil && paymasterPostOpResult.Failed()) { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + return receipt, err +} + +func prepareNonceManagerMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig) *Message { + tx := baseTx.Rip7560TransactionData() + key := make([]byte, 32) + fromBig, _ := uint256.FromBig(tx.BigNonce) + fromBig.WriteToSlice(key) + + nonceManagerData := make([]byte, 0) + nonceManagerData = append(nonceManagerData[:], tx.Sender.Bytes()...) + nonceManagerData = append(nonceManagerData[:], key...) + return &Message{ + From: chainConfig.EntryPointAddress, + To: &chainConfig.NonceManagerAddress, + Value: big.NewInt(0), + GasLimit: 100000, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: nonceManagerData, + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + } +} + +func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { + tx := baseTx.Rip7560TransactionData() + if len(tx.DeployerData) < 20 { + return nil + } + var deployerAddress common.Address = [20]byte(tx.DeployerData[0:20]) + return &Message{ + From: config.DeployerCallerAddress, + To: &deployerAddress, + Value: big.NewInt(0), + GasLimit: tx.ValidationGas, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: tx.DeployerData[20:], + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + } +} + +func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, deploymentUsedGas uint64) (*Message, error) { + tx := baseTx.Rip7560TransactionData() + jsondata := `[ + {"type":"function","name":"validateTransaction","inputs": [{"name": "version","type": "uint256"},{"name": "txHash","type": "bytes32"},{"name": "transaction","type": "bytes"}]} + ]` + + validateTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) + if err != nil { + return nil, err + } + txAbiEncoding, err := tx.AbiEncode() + validateTransactionData, err := validateTransactionAbi.Pack("validateTransaction", big.NewInt(0), signingHash, txAbiEncoding) + return &Message{ + From: chainConfig.EntryPointAddress, + To: tx.Sender, + Value: big.NewInt(0), + GasLimit: tx.ValidationGas - deploymentUsedGas, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: validateTransactionData, + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + }, nil +} + +func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { + tx := baseTx.Rip7560TransactionData() + if len(tx.PaymasterData) < 20 { + return nil, nil + } + var paymasterAddress common.Address = [20]byte(tx.PaymasterData[0:20]) + jsondata := `[ + {"type":"function","name":"validatePaymasterTransaction","inputs": [{"name": "version","type": "uint256"},{"name": "txHash","type": "bytes32"},{"name": "transaction","type": "bytes"}]} + ]` + + validateTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) + txAbiEncoding, err := tx.AbiEncode() + data, err := validateTransactionAbi.Pack("validatePaymasterTransaction", big.NewInt(0), signingHash, txAbiEncoding) + + if err != nil { + return nil, err + } + return &Message{ + From: config.EntryPointAddress, + To: &paymasterAddress, + Value: big.NewInt(0), + GasLimit: tx.PaymasterGas, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: data, + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + }, nil +} + +func prepareAccountExecutionMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { + tx := baseTx.Rip7560TransactionData() + return &Message{ + From: config.EntryPointAddress, + To: tx.Sender, + Value: big.NewInt(0), + GasLimit: tx.Gas, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: tx.Data, + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + } +} + +func prepareEOATargetExecutionMessage(baseTx *types.Transaction) (*Message, error) { + tx := baseTx.Rip7560TransactionData() + if len(tx.Data) < 20 { + return nil, errors.New("RIP-7560 sent by an EOA but the transaction data is too short") + } + var to common.Address = [20]byte(tx.Data[0:20]) + return &Message{ + From: *tx.Sender, + To: &to, + Value: tx.Value, + GasLimit: tx.Gas, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: tx.Data[20:], + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + }, nil +} + +func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainConfig, executionResult *ExecutionResult) (*Message, error) { + if len(vpr.PaymasterContext) == 0 { + return nil, nil + } + + tx := vpr.Tx.Rip7560TransactionData() + jsondata := `[ + {"type":"function","name":"postPaymasterTransaction","inputs": [{"name": "success","type": "bool"},{"name": "actualGasCost","type": "uint256"},{"name": "context","type": "bytes"}]} + ]` + postPaymasterTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) + if err != nil { + return nil, err + } + postOpData, err := postPaymasterTransactionAbi.Pack("postPaymasterTransaction", true, big.NewInt(0), vpr.PaymasterContext) + if err != nil { + return nil, err + } + var paymasterAddress common.Address = [20]byte(tx.PaymasterData[0:20]) + return &Message{ + From: chainConfig.EntryPointAddress, + To: &paymasterAddress, + Value: big.NewInt(0), + GasLimit: tx.PaymasterGas - executionResult.UsedGas, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: postOpData, + AccessList: tx.AccessList, + SkipAccountChecks: true, + IsRip7560Frame: true, + }, nil +} + +func validateAccountReturnData(data []byte) (uint64, uint64, error) { + MAGIC_VALUE_SENDER := uint32(0xbf45c166) + if len(data) != 32 { + return 0, 0, errors.New("invalid account return data length") + } + magicExpected := binary.BigEndian.Uint32(data[:4]) + if magicExpected != MAGIC_VALUE_SENDER { + return 0, 0, errors.New("account did not return correct MAGIC_VALUE") + } + validAfter := binary.BigEndian.Uint64(data[4:12]) + validUntil := binary.BigEndian.Uint64(data[12:20]) + return validAfter, validUntil, nil +} + +func validatePaymasterReturnData(data []byte) ([]byte, uint64, uint64, error) { + MAGIC_VALUE_PAYMASTER := uint32(0xe0e6183a) + if len(data) < 4 { + return nil, 0, 0, errors.New("invalid paymaster return data length") + } + magicExpected := binary.BigEndian.Uint32(data[:4]) + if magicExpected != MAGIC_VALUE_PAYMASTER { + return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") + } + + jsondata := `[ + {"type":"function","name":"validatePaymasterTransaction","outputs": [{"name": "context","type": "bytes"},{"name": "validUntil","type": "uint256"},{"name": "validAfter","type": "uint256"}]} + ]` + validatePaymasterTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) + if err != nil { + // todo: wrap error message + return nil, 0, 0, err + } + decodedPmReturnData, err := validatePaymasterTransactionAbi.Unpack("validatePaymasterTransaction", data[4:]) + if err != nil { + return nil, 0, 0, err + } + context := decodedPmReturnData[0].([]byte) + validAfter := decodedPmReturnData[1].(*big.Int) + validUntil := decodedPmReturnData[2].(*big.Int) + return context, validAfter.Uint64(), validUntil.Uint64(), nil +} + +func validateValidityTimeRange(time uint64, validAfter uint64, validUntil uint64) error { + if validUntil == 0 && validAfter == 0 { + return nil + } + if validUntil < validAfter { + return errors.New("RIP-7560 transaction validity range invalid") + } + if time > validUntil { + return errors.New("RIP-7560 transaction validity expired") + } + if time < validAfter { + return errors.New("RIP-7560 transaction validity not reached yet") + } + return nil +} diff --git a/core/state_transition.go b/core/state_transition.go index a52e24dc4395..70b642e74798 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -146,6 +146,7 @@ type Message struct { // account nonce in state. It also disables checking that the sender is an EOA. // This field will be set to true for operations like RPC eth_call. SkipAccountChecks bool + IsRip7560Frame bool } // TransactionToMessage converts a transaction into a Message. @@ -213,6 +214,7 @@ type StateTransition struct { initialGas uint64 state vm.StateDB evm *vm.EVM + rip7560Frame bool } // NewStateTransition initialises and returns a new state transition object. diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index f1c2c10fc965..f1b965dedb6e 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -1653,3 +1653,18 @@ func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus { } return txpool.TxStatusUnknown } + +func (pool *BlobPool) SubmitRip7560Bundle(_ *types.ExternallyReceivedBundle) error { + // nothing to do here + return nil +} + +func (pool *BlobPool) GetRip7560BundleStatus(_ common.Hash) (*types.BundleReceipt, error) { + // nothing to do here + return nil, nil +} + +func (pool *BlobPool) PendingRip7560Bundle() (*types.ExternallyReceivedBundle, error) { + // nothing to do here + return nil, nil +} diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 4e1d26acf405..e5a59cf680fd 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -117,6 +117,8 @@ type BlockChain interface { // StateAt returns a state database for a given root hash (generally the head). StateAt(root common.Hash) (*state.StateDB, error) + + GetReceiptsByHash(hash common.Hash) types.Receipts } // Config are the configuration parameters of the transaction pool. @@ -1959,3 +1961,18 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { func numSlots(tx *types.Transaction) int { return int((tx.Size() + txSlotSize - 1) / txSlotSize) } + +func (pool *LegacyPool) SubmitRip7560Bundle(_ *types.ExternallyReceivedBundle) error { + // nothing to do here + return nil +} + +func (pool *LegacyPool) GetRip7560BundleStatus(_ common.Hash) (*types.BundleReceipt, error) { + // nothing to do here + return nil, nil +} + +func (pool *LegacyPool) PendingRip7560Bundle() (*types.ExternallyReceivedBundle, error) { + // nothing to do here + return nil, nil +} diff --git a/core/txpool/rip7560pool/rip7560pool.go b/core/txpool/rip7560pool/rip7560pool.go new file mode 100644 index 000000000000..3c6670d34e4a --- /dev/null +++ b/core/txpool/rip7560pool/rip7560pool.go @@ -0,0 +1,273 @@ +package rip7560pool + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "math/big" + "sync" + "sync/atomic" +) + +type Config struct { + MaxBundleSize uint + MaxBundleGas uint +} + +// Rip7560BundlerPool is the transaction pool dedicated to RIP-7560 AA transactions. +// This implementation relies on an external bundler process to perform most of the hard work. +type Rip7560BundlerPool struct { + config Config + chain legacypool.BlockChain + txFeed event.Feed + currentHead atomic.Pointer[types.Header] // Current head of the blockchain + + pendingBundles []*types.ExternallyReceivedBundle + includedBundles map[common.Hash]*types.BundleReceipt + + mu sync.Mutex + + coinbase common.Address +} + +func (pool *Rip7560BundlerPool) Init(_ uint64, head *types.Header, _ txpool.AddressReserver) error { + pool.pendingBundles = make([]*types.ExternallyReceivedBundle, 0) + pool.includedBundles = make(map[common.Hash]*types.BundleReceipt) + pool.currentHead.Store(head) + return nil +} + +func (pool *Rip7560BundlerPool) Close() error { + return nil +} + +func (pool *Rip7560BundlerPool) Reset(oldHead, newHead *types.Header) { + pool.mu.Lock() + defer pool.mu.Unlock() + + newIncludedBundles := pool.gatherIncludedBundlesStats(newHead) + for _, included := range newIncludedBundles { + pool.includedBundles[included.BundleHash] = included + } + + pendingBundles := make([]*types.ExternallyReceivedBundle, 0, len(pool.pendingBundles)) + for _, bundle := range pool.pendingBundles { + nextBlock := big.NewInt(0).Add(newHead.Number, big.NewInt(1)) + if bundle.ValidForBlock.Cmp(nextBlock) == 0 { + pendingBundles = append(pendingBundles, bundle) + } + } + pool.pendingBundles = pendingBundles + pool.currentHead.Store(newHead) +} + +// For simplicity, this function assumes 'Reset' called for each new block sequentially. +func (pool *Rip7560BundlerPool) gatherIncludedBundlesStats(newHead *types.Header) map[common.Hash]*types.BundleReceipt { + // 1. Is there a bundle included in the block? + + // note that in 'clique' mode Coinbase is always set to 0x000...000 + if newHead.Coinbase.Cmp(pool.coinbase) != 0 && newHead.Coinbase.Cmp(common.Address{}) != 0 { + // not our block + return nil + } + + // get all transaction hashes in block + add := pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + block := add.Transactions() + + receipts := pool.chain.GetReceiptsByHash(add.Hash()) + // match transactions in block to bundle ? + + includedBundles := make(map[common.Hash]*types.BundleReceipt) + + // 'pendingBundles' length is expected to be single digits, probably a single bundle in most cases + for _, bundle := range pool.pendingBundles { + if len(block) < len(bundle.Transactions) { + // this bundle does not even fit this block + continue + } + for i := 0; i < len(block); i++ { + transactions := make(types.Transactions, 0) + for j := 0; j < len(bundle.Transactions); j++ { + blockTx := block[i] + bundleTx := bundle.Transactions[j] + if bundleTx.Hash().Cmp(blockTx.Hash()) == 0 { + // tx hash has matched + transactions = append(transactions, blockTx) + if j == len(bundle.Transactions)-1 { + // FOUND BUNDLE IN BLOCK + receipt := createBundleReceipt(add, bundle.BundleHash, transactions, receipts) + includedBundles[bundle.BundleHash] = receipt + } else { + // let's see if next tx in bundle matches + i++ + } + } + } + } + + } + return includedBundles +} + +func createBundleReceipt(block *types.Block, BundleHash common.Hash, transactions types.Transactions, blockReceipts types.Receipts) *types.BundleReceipt { + receipts := make(types.Receipts, 0) + +OuterLoop: + for _, transaction := range transactions { + for _, receipt := range blockReceipts { + if receipt.TxHash == transaction.Hash() { + receipts = append(receipts, receipt) + continue OuterLoop + } + } + panic("receipt not found for transaction") + } + + var gasUsed uint64 = 0 + var gasPaidPriority = big.NewInt(0) + + for _, receipt := range receipts { + gasUsed += receipt.GasUsed + priorityFeePerGas := big.NewInt(0).Sub(receipt.EffectiveGasPrice, block.BaseFee()) + priorityFeePaid := big.NewInt(0).Mul(big.NewInt(int64(gasUsed)), priorityFeePerGas) + gasPaidPriority = big.NewInt(0).Add(gasPaidPriority, priorityFeePaid) + } + + return &types.BundleReceipt{ + BundleHash: BundleHash, + Count: uint64(len(transactions)), + Status: 0, + BlockNumber: block.NumberU64(), + BlockHash: block.Hash(), + TransactionReceipts: receipts, + GasUsed: gasUsed, + GasPaidPriority: gasPaidPriority, + BlockTimestamp: block.Time(), + } +} + +// SetGasTip is ignored by the External Bundler AA sub pool. +func (pool *Rip7560BundlerPool) SetGasTip(_ *big.Int) {} + +func (pool *Rip7560BundlerPool) Has(hash common.Hash) bool { + pool.mu.Lock() + defer pool.mu.Unlock() + + tx := pool.Get(hash) + return tx != nil +} + +func (pool *Rip7560BundlerPool) Get(hash common.Hash) *types.Transaction { + pool.mu.Lock() + defer pool.mu.Unlock() + + for _, bundle := range pool.pendingBundles { + for _, tx := range bundle.Transactions { + if tx.Hash().Cmp(hash) == 0 { + return tx + } + } + } + return nil +} + +func (pool *Rip7560BundlerPool) Add(_ []*types.Transaction, _ bool, _ bool) []error { + return nil +} + +func (pool *Rip7560BundlerPool) Pending(_ txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { + return nil +} + +func (pool *Rip7560BundlerPool) PendingRip7560Bundle() (*types.ExternallyReceivedBundle, error) { + pool.mu.Lock() + defer pool.mu.Unlock() + + bundle := pool.selectExternalBundle() + return bundle, nil +} + +// SubscribeTransactions is not needed for the External Bundler AA sub pool and 'ch' will never be sent anything. +func (pool *Rip7560BundlerPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, _ bool) event.Subscription { + return pool.txFeed.Subscribe(ch) +} + +// Nonce is only used from 'GetPoolNonce' which is not relevant for AA transactions. +func (pool *Rip7560BundlerPool) Nonce(_ common.Address) uint64 { + return 0 +} + +// Stats function not implemented for the External Bundler AA sub pool. +func (pool *Rip7560BundlerPool) Stats() (int, int) { + return 0, 0 +} + +// Content function not implemented for the External Bundler AA sub pool. +func (pool *Rip7560BundlerPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { + return nil, nil +} + +// ContentFrom function not implemented for the External Bundler AA sub pool. +func (pool *Rip7560BundlerPool) ContentFrom(_ common.Address) ([]*types.Transaction, []*types.Transaction) { + return nil, nil +} + +// Locals are not necessary for AA Pool +func (pool *Rip7560BundlerPool) Locals() []common.Address { + return []common.Address{} +} + +func (pool *Rip7560BundlerPool) Status(_ common.Hash) txpool.TxStatus { + panic("implement me") +} + +// New creates a new RIP-7560 Account Abstraction Bundler transaction pool. +func New(config Config, chain legacypool.BlockChain, coinbase common.Address) *Rip7560BundlerPool { + return &Rip7560BundlerPool{ + config: config, + chain: chain, + coinbase: coinbase, + } +} + +// Filter rejects all individual transactions for External Bundler AA sub pool. +func (pool *Rip7560BundlerPool) Filter(_ *types.Transaction) bool { + return false +} + +func (pool *Rip7560BundlerPool) SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error { + pool.mu.Lock() + defer pool.mu.Unlock() + + currentBlock := pool.currentHead.Load().Number + nextBlock := big.NewInt(0).Add(currentBlock, big.NewInt(1)) + log.Error("RIP-7560 bundle submitted", "validForBlock", bundle.ValidForBlock.String(), "nextBlock", nextBlock.String()) + pool.pendingBundles = append(pool.pendingBundles, bundle) + if nextBlock.Cmp(bundle.ValidForBlock) == 0 { + pool.txFeed.Send(core.NewTxsEvent{Txs: bundle.Transactions}) + } + return nil +} + +func (pool *Rip7560BundlerPool) GetRip7560BundleStatus(hash common.Hash) (*types.BundleReceipt, error) { + pool.mu.Lock() + defer pool.mu.Unlock() + + return pool.includedBundles[hash], nil +} + +// Simply returns the bundle with the highest promised revenue by fully trusting the bundler-provided value. +func (pool *Rip7560BundlerPool) selectExternalBundle() *types.ExternallyReceivedBundle { + var selectedBundle *types.ExternallyReceivedBundle + for _, bundle := range pool.pendingBundles { + if selectedBundle == nil || selectedBundle.ExpectedRevenue.Cmp(bundle.ExpectedRevenue) == -1 { + selectedBundle = bundle + } + } + return selectedBundle +} diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index 9881ed1b8f96..246a94bbeb2c 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -162,4 +162,10 @@ type SubPool interface { // Status returns the known status (unknown/pending/queued) of a transaction // identified by their hashes. Status(hash common.Hash) TxStatus + + // RIP-7560 specific subpool functions, other subpools should ignore these + + SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error + GetRip7560BundleStatus(hash common.Hash) (*types.BundleReceipt, error) + PendingRip7560Bundle() (*types.ExternallyReceivedBundle, error) } diff --git a/core/txpool/txpool_rip7560.go b/core/txpool/txpool_rip7560.go new file mode 100644 index 000000000000..5e5ef8f8e7ee --- /dev/null +++ b/core/txpool/txpool_rip7560.go @@ -0,0 +1,46 @@ +package txpool + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// SubmitBundle inserts the entire bundle of Type 4 transactions into the relevant pool. +func (p *TxPool) SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error { + // todo: we cannot 'filter-out' the AA pool so just passing to all pools - only AA pool has code in SubmitBundle + for _, subpool := range p.subpools { + err := subpool.SubmitRip7560Bundle(bundle) + if err != nil { + return err + } + } + return nil +} + +func (p *TxPool) GetRip7560BundleStatus(hash common.Hash) (*types.BundleReceipt, error) { + // todo: we cannot 'filter-out' the AA pool so just passing to all pools - only AA pool has code in SubmitBundle + for _, subpool := range p.subpools { + bundleStats, err := subpool.GetRip7560BundleStatus(hash) + if err != nil { + return nil, err + } + if bundleStats != nil { + return bundleStats, nil + } + } + return nil, nil +} + +func (p *TxPool) PendingRip7560Bundle() (*types.ExternallyReceivedBundle, error) { + // todo: we cannot 'filter-out' the AA pool so just passing to all pools - only AA pool has code in PendingBundle + for _, subpool := range p.subpools { + pendingBundle, err := subpool.PendingRip7560Bundle() + if err != nil { + return nil, err + } + if pendingBundle != nil { + return pendingBundle, nil + } + } + return nil, nil +} diff --git a/core/types/transaction.go b/core/types/transaction.go index 6a27ecbfecee..e8b389d8e6f7 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -49,6 +49,7 @@ const ( AccessListTxType = 0x01 DynamicFeeTxType = 0x02 BlobTxType = 0x03 + Rip7560Type = 0x04 ) // Transaction is an Ethereum transaction. @@ -466,6 +467,12 @@ func (tx *Transaction) WithBlobTxSidecar(sideCar *BlobTxSidecar) *Transaction { return cpy } +func (tx *Transaction) Rip7560TransactionData() *Rip7560AccountAbstractionTx { + inner := tx.inner + ptr := inner.(*Rip7560AccountAbstractionTx) + return ptr +} + // SetTime sets the decoding time of a transaction. This is used by tests to set // arbitrary times and by persistent transaction pools when loading old txs from // disk. diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go new file mode 100644 index 000000000000..b63de28dc94f --- /dev/null +++ b/core/types/tx_rip7560.go @@ -0,0 +1,221 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "bytes" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" + "math/big" +) + +const ScaTransactionSubtype = 0x01 + +// Rip7560AccountAbstractionTx represents an RIP-7560 transaction. +type Rip7560AccountAbstractionTx struct { + Subtype byte + // overlapping fields + ChainID *big.Int + GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas + GasFeeCap *big.Int // a.k.a. maxFeePerGas + Gas uint64 + Data []byte + AccessList AccessList + + // extra fields + Sender *common.Address + Signature []byte + PaymasterData []byte + DeployerData []byte + BuilderFee *big.Int + ValidationGas uint64 + PaymasterGas uint64 + PostOpGas uint64 + BigNonce *big.Int + + // removed fields + To *common.Address + Nonce uint64 + Value *big.Int +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *Rip7560AccountAbstractionTx) copy() TxData { + cpy := &Rip7560AccountAbstractionTx{ + Subtype: tx.Subtype, + To: copyAddressPtr(tx.To), + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are copied below. + AccessList: make(AccessList, len(tx.AccessList)), + Value: new(big.Int), + ChainID: new(big.Int), + GasTipCap: new(big.Int), + GasFeeCap: new(big.Int), + + BigNonce: new(big.Int), + Sender: copyAddressPtr(tx.Sender), + Signature: common.CopyBytes(tx.Signature), + PaymasterData: common.CopyBytes(tx.PaymasterData), + DeployerData: common.CopyBytes(tx.DeployerData), + BuilderFee: new(big.Int), + ValidationGas: tx.ValidationGas, + PaymasterGas: tx.PaymasterGas, + } + copy(cpy.AccessList, tx.AccessList) + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.GasTipCap != nil { + cpy.GasTipCap.Set(tx.GasTipCap) + } + if tx.GasFeeCap != nil { + cpy.GasFeeCap.Set(tx.GasFeeCap) + } + if tx.BigNonce != nil { + cpy.BigNonce.Set(tx.BigNonce) + } + if tx.BuilderFee != nil { + cpy.BuilderFee.Set(tx.BuilderFee) + } + return cpy +} + +// accessors for innerTx. +func (tx *Rip7560AccountAbstractionTx) txType() byte { return Rip7560Type } +func (tx *Rip7560AccountAbstractionTx) chainID() *big.Int { return tx.ChainID } +func (tx *Rip7560AccountAbstractionTx) accessList() AccessList { return tx.AccessList } +func (tx *Rip7560AccountAbstractionTx) data() []byte { return tx.Data } +func (tx *Rip7560AccountAbstractionTx) gas() uint64 { return tx.Gas } +func (tx *Rip7560AccountAbstractionTx) gasFeeCap() *big.Int { return tx.GasFeeCap } +func (tx *Rip7560AccountAbstractionTx) gasTipCap() *big.Int { return tx.GasTipCap } +func (tx *Rip7560AccountAbstractionTx) gasPrice() *big.Int { return tx.GasFeeCap } +func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return tx.Value } +func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return 0 } +func (tx *Rip7560AccountAbstractionTx) bigNonce() *big.Int { return tx.BigNonce } +func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return tx.To } + +func (tx *Rip7560AccountAbstractionTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + if baseFee == nil { + return dst.Set(tx.GasFeeCap) + } + tip := dst.Sub(tx.GasFeeCap, baseFee) + if tip.Cmp(tx.GasTipCap) > 0 { + tip.Set(tx.GasTipCap) + } + return tip.Add(tip, baseFee) +} + +func (tx *Rip7560AccountAbstractionTx) rawSignatureValues() (v, r, s *big.Int) { + return new(big.Int), new(big.Int), new(big.Int) +} + +func (tx *Rip7560AccountAbstractionTx) setSignatureValues(chainID, v, r, s *big.Int) { + //tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s +} + +// encode the subtype byte and the payload-bearing bytes of the RIP-7560 transaction +func (tx *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { + b.WriteByte(ScaTransactionSubtype) + return rlp.Encode(b, tx) +} + +// decode the payload-bearing bytes of the encoded RIP-7560 transaction payload +func (tx *Rip7560AccountAbstractionTx) decode(input []byte) error { + tx.Subtype = ScaTransactionSubtype + return rlp.DecodeBytes(input[1:], tx) +} + +// Rip7560Transaction an equivalent of a solidity struct only used to encode the 'transaction' parameter +type Rip7560Transaction struct { + Sender common.Address + Nonce *big.Int + ValidationGasLimit *big.Int + PaymasterGasLimit *big.Int + CallGasLimit *big.Int + MaxFeePerGas *big.Int + MaxPriorityFeePerGas *big.Int + BuilderFee *big.Int + PaymasterData []byte + DeployerData []byte + CallData []byte + Signature []byte +} + +func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { + structThing, _ := abi.NewType("tuple", "struct thing", []abi.ArgumentMarshaling{ + {Name: "sender", Type: "address"}, + {Name: "nonce", Type: "uint256"}, + {Name: "validationGasLimit", Type: "uint256"}, + {Name: "paymasterGasLimit", Type: "uint256"}, + {Name: "callGasLimit", Type: "uint256"}, + {Name: "maxFeePerGas", Type: "uint256"}, + {Name: "maxPriorityFeePerGas", Type: "uint256"}, + {Name: "builderFee", Type: "uint256"}, + {Name: "paymasterData", Type: "bytes"}, + {Name: "deployerData", Type: "bytes"}, + {Name: "callData", Type: "bytes"}, + {Name: "signature", Type: "bytes"}, + }) + + args := abi.Arguments{ + {Type: structThing, Name: "param_one"}, + } + record := &Rip7560Transaction{ + Sender: *tx.Sender, + Nonce: tx.BigNonce, + ValidationGasLimit: big.NewInt(int64(tx.ValidationGas)), + PaymasterGasLimit: big.NewInt(int64(tx.PaymasterGas)), + CallGasLimit: big.NewInt(int64(tx.Gas)), + MaxFeePerGas: tx.GasFeeCap, + MaxPriorityFeePerGas: tx.GasTipCap, + BuilderFee: tx.BuilderFee, + PaymasterData: tx.PaymasterData, + DeployerData: tx.DeployerData, + CallData: tx.Data, + Signature: tx.Signature, + } + packed, err := args.Pack(&record) + return packed, err +} + +// ExternallyReceivedBundle represents a bundle of Type 4 transactions received from a trusted 3rd party. +// The validator includes the bundle in the original order atomically or drops it completely. +type ExternallyReceivedBundle struct { + BundlerId string + BundleHash common.Hash + ExpectedRevenue *big.Int + ValidForBlock *big.Int + Transactions []*Transaction +} + +// BundleReceipt represents a receipt for an ExternallyReceivedBundle successfully included in a block. +type BundleReceipt struct { + BundleHash common.Hash + Count uint64 + Status uint64 // 0=included / 1=pending / 2=invalid / 3=unknown + BlockNumber uint64 + BlockHash common.Hash + TransactionReceipts []*Receipt + GasUsed uint64 + GasPaidPriority *big.Int + BlockTimestamp uint64 +} diff --git a/eth/api_backend_rip7560.go b/eth/api_backend_rip7560.go new file mode 100644 index 000000000000..4a8ad2f36e6c --- /dev/null +++ b/eth/api_backend_rip7560.go @@ -0,0 +1,15 @@ +package eth + +import ( + "context" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +func (b *EthAPIBackend) SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error { + return b.eth.txPool.SubmitRip7560Bundle(bundle) +} + +func (b *EthAPIBackend) GetRip7560BundleStatus(ctx context.Context, hash common.Hash) (*types.BundleReceipt, error) { + return b.eth.txPool.GetRip7560BundleStatus(hash) +} diff --git a/eth/backend.go b/eth/backend.go index e616b5f2f195..0c2f829e53a8 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/ethereum/go-ethereum/core/txpool/rip7560pool" "math/big" "runtime" "sync" @@ -240,7 +241,13 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } legacyPool := legacypool.New(config.TxPool, eth.blockchain) - eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + rip7560PoolConfig := rip7560pool.Config{ + MaxBundleGas: 10000000, + MaxBundleSize: 100, + } + rip7560 := rip7560pool.New(rip7560PoolConfig, eth.blockchain, config.Miner.Etherbase) + + eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool, rip7560}) if err != nil { return nil, err } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 2a45ba09210f..35c4a9041975 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -97,6 +97,10 @@ type Backend interface { SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription BloomStatus() (uint64, uint64) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) + + // RIP-7560 specific functions + SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error + GetRip7560BundleStatus(ctx context.Context, hash common.Hash) (*types.BundleReceipt, error) } func GetAPIs(apiBackend Backend) []rpc.API { diff --git a/internal/ethapi/rip7560api.go b/internal/ethapi/rip7560api.go new file mode 100644 index 000000000000..0245b3207791 --- /dev/null +++ b/internal/ethapi/rip7560api.go @@ -0,0 +1,62 @@ +package ethapi + +import ( + "context" + "errors" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" + "math/big" +) + +func (s *TransactionAPI) SendRip7560TransactionsBundle(ctx context.Context, args []TransactionArgs, creationBlock *big.Int, expectedRevenue *big.Int, bundlerId string) (common.Hash, error) { + if len(args) == 0 { + return common.Hash{}, errors.New("submitted bundle has zero length") + } + txs := make([]*types.Transaction, len(args)) + for i := 0; i < len(args); i++ { + txs[i] = args[i].ToTransaction() + } + bundle := &types.ExternallyReceivedBundle{ + BundlerId: bundlerId, + ExpectedRevenue: expectedRevenue, + ValidForBlock: creationBlock, + Transactions: txs, + } + bundleHash := calculateBundleHash(txs) + bundle.BundleHash = bundleHash + err := SubmitRip7560Bundle(ctx, s.b, bundle) + if err != nil { + return common.Hash{}, err + } + return bundleHash, nil +} + +func (s *TransactionAPI) GetRip7560BundleStatus(ctx context.Context, hash common.Hash) (*types.BundleReceipt, error) { + bundleStats, err := s.b.GetRip7560BundleStatus(ctx, hash) + return bundleStats, err +} + +// TODO: If this code is indeed necessary, keep it in utils; better - remove altogether. +func calculateBundleHash(txs []*types.Transaction) common.Hash { + appendedTxIds := make([]byte, 0) + for _, tx := range txs { + txHash := tx.Hash() + appendedTxIds = append(appendedTxIds, txHash[:]...) + } + + return rlpHash(appendedTxIds) +} + +func rlpHash(x interface{}) (h common.Hash) { + hw := sha3.NewLegacyKeccak256() + rlp.Encode(hw, x) + hw.Sum(h[:0]) + return h +} + +// SubmitRip7560Bundle is a helper function that submits a bundle of Type 4 transactions to txPool and logs a message. +func SubmitRip7560Bundle(ctx context.Context, b Backend, bundle *types.ExternallyReceivedBundle) error { + return b.SubmitRip7560Bundle(bundle) +} diff --git a/miner/worker.go b/miner/worker.go index 5dc3e2056b81..c52a9c7eb092 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -373,6 +373,22 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran return nil } +func (miner *Miner) commitRip7560TransactionsBundle(env *environment, txs *types.ExternallyReceivedBundle, _ *atomic.Int32) error { + + // todo: copied over to fix crash, probably should do it once + gasLimit := env.header.GasLimit + if env.gasPool == nil { + env.gasPool = new(core.GasPool).AddGas(gasLimit) + } + + validatedTxs, receipts, _, err := core.HandleRip7560Transactions(txs.Transactions, 0, env.state, &env.coinbase, env.header, env.gasPool, miner.chainConfig, miner.chain, vm.Config{}) + + env.txs = append(env.txs, validatedTxs...) + env.receipts = append(env.receipts, receipts...) + env.tcount += len(validatedTxs) + return err +} + // fillTransactions retrieves the pending transactions from the txpool and fills them // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. @@ -411,6 +427,14 @@ func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment) localBlobTxs[account] = txs } } + + pendingBundle, err := miner.txpool.PendingRip7560Bundle() + if pendingBundle != nil { + if err = miner.commitRip7560TransactionsBundle(env, pendingBundle, interrupt); err != nil { + return err + } + } + // Fill the block with all available pending transactions. if len(localPlainTxs) > 0 || len(localBlobTxs) > 0 { plainTxs := newTransactionsByPriceAndNonce(env.signer, localPlainTxs, env.header.BaseFee) diff --git a/params/config.go b/params/config.go index 486cb6d132b1..9d9fc242bc3b 100644 --- a/params/config.go +++ b/params/config.go @@ -368,6 +368,11 @@ type ChainConfig struct { // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` + + // RIP-7560 specific config parameters + EntryPointAddress common.Address `json:"entryPointAddress,omitempty"` + NonceManagerAddress common.Address `json:"nonceManagerAddress,omitempty"` + DeployerCallerAddress common.Address `json:"deployerCallerAddress,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. From 034f2653e0a9c7fd1bdd9e2da380eb2663fb65b4 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 30 Apr 2024 13:30:41 +0200 Subject: [PATCH 02/73] TMP: check in commands to run the validator and beacon chain node --- alexfdata/commands/1-prysmctl.sh | 5 ++ alexfdata/commands/2-geth.sh | 11 +++ alexfdata/commands/3-beacon.sh | 5 ++ alexfdata/commands/4-validator.sh | 5 ++ alexfdata/config.yml | 28 ++++++++ alexfdata/genesis.json | 112 ++++++++++++++++++++++++++++++ alexfdata/jwt.hex | 1 + alexfdata/pwd | 0 alexfdata/secret.txt | 2 + 9 files changed, 169 insertions(+) create mode 100755 alexfdata/commands/1-prysmctl.sh create mode 100755 alexfdata/commands/2-geth.sh create mode 100755 alexfdata/commands/3-beacon.sh create mode 100755 alexfdata/commands/4-validator.sh create mode 100644 alexfdata/config.yml create mode 100644 alexfdata/genesis.json create mode 100644 alexfdata/jwt.hex create mode 100644 alexfdata/pwd create mode 100644 alexfdata/secret.txt diff --git a/alexfdata/commands/1-prysmctl.sh b/alexfdata/commands/1-prysmctl.sh new file mode 100755 index 000000000000..652b1df156d1 --- /dev/null +++ b/alexfdata/commands/1-prysmctl.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +rm ../genesis.ssz + +../prysmctl testnet generate-genesis --fork capella --num-validators 64 --genesis-time-delay 600 --chain-config-file ../config.yml --geth-genesis-json-in ../genesis.json --geth-genesis-json-out ../genesis.json --output-ssz ../genesis.ssz diff --git a/alexfdata/commands/2-geth.sh b/alexfdata/commands/2-geth.sh new file mode 100755 index 000000000000..417808c08995 --- /dev/null +++ b/alexfdata/commands/2-geth.sh @@ -0,0 +1,11 @@ +#!/bin/sh -e + +rm -rf ../gethdata + +../../build/bin/geth --datadir=../gethdata --password=../pwd account import ../secret.txt + +../../build/bin/geth --datadir=../gethdata init ../genesis.json + +echo "STARTING GETH" + +../../build/bin/geth --http --http.port 8545 --http.api eth,net,web3 --ws --ws.api eth,net,web3 --authrpc.jwtsecret ../jwt.hex --datadir ../gethdata --nodiscover --syncmode full --allow-insecure-unlock --unlock 0x123463a4b065722e99115d6c222f267d9cabb524 --password ../pwd diff --git a/alexfdata/commands/3-beacon.sh b/alexfdata/commands/3-beacon.sh new file mode 100755 index 000000000000..da22de713756 --- /dev/null +++ b/alexfdata/commands/3-beacon.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +rm -rf ../beacondata + +../beacon_chain --datadir ../beacondata --min-sync-peers 0 --genesis-state ../genesis.ssz --bootstrap-node= --interop-eth1data-votes --chain-config-file ../config.yml --contract-deployment-block 0 --chain-id 32382 --accept-terms-of-use --jwt-secret ../jwt.hex --suggested-fee-recipient 0x123463a4B065722E99115D6c222f267d9cABb524 --minimum-peers-per-subnet 0 --enable-debug-rpc-endpoints --execution-endpoint ../gethdata/geth.ipc diff --git a/alexfdata/commands/4-validator.sh b/alexfdata/commands/4-validator.sh new file mode 100755 index 000000000000..5a35db216459 --- /dev/null +++ b/alexfdata/commands/4-validator.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +rm -rf ../validatordata + +../validator --datadir ../validatordata --accept-terms-of-use --interop-num-validators 64 --chain-config-file ../config.yml diff --git a/alexfdata/config.yml b/alexfdata/config.yml new file mode 100644 index 000000000000..372cd8fb18cd --- /dev/null +++ b/alexfdata/config.yml @@ -0,0 +1,28 @@ +CONFIG_NAME: interop +PRESET_BASE: interop + +# Genesis +GENESIS_FORK_VERSION: 0x20000089 + +# Altair +ALTAIR_FORK_EPOCH: 0 +ALTAIR_FORK_VERSION: 0x20000090 + +# Merge +BELLATRIX_FORK_EPOCH: 0 +BELLATRIX_FORK_VERSION: 0x20000091 +TERMINAL_TOTAL_DIFFICULTY: 0 + +# Capella +CAPELLA_FORK_EPOCH: 0 +CAPELLA_FORK_VERSION: 0x20000092 +MAX_WITHDRAWALS_PER_PAYLOAD: 16 + +DENEB_FORK_VERSION: 0x20000093 + +# Time parameters +SECONDS_PER_SLOT: 12 +SLOTS_PER_EPOCH: 6 + +# Deposit contract +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 diff --git a/alexfdata/genesis.json b/alexfdata/genesis.json new file mode 100644 index 000000000000..cfa44c4d89dc --- /dev/null +++ b/alexfdata/genesis.json @@ -0,0 +1,112 @@ +{ + "config": { + "chainId": 32382, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "shanghaiTime": 1714475887, + "cancunTime": 1733884783, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true + }, + "nonce": "0x0", + "timestamp": "0x6630d36f", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000123463a4b065722e99115d6c222f267d9cabb5240000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "0x1c9c380", + "difficulty": "0x1", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "123463a4b065722e99115d6c222f267d9cabb524": { + "balance": "0x43c33c1937564800000" + }, + "14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0x21e19e0c9bab2400000" + }, + "15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0x21e19e0c9bab2400000" + }, + "1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0x21e19e0c9bab2400000" + }, + "23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0x21e19e0c9bab2400000" + }, + "2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0x21e19e0c9bab2400000" + }, + "3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0x21e19e0c9bab2400000" + }, + "4242424242424242424242424242424242424242": { + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100b6578063621fd130146101e3578063c5f2892f14610273575b600080fd5b34801561005057600080fd5b5061009c6004803603602081101561006757600080fd5b8101908080357bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916906020019092919050505061029e565b604051808215151515815260200191505060405180910390f35b6101e1600480360360808110156100cc57600080fd5b81019080803590602001906401000000008111156100e957600080fd5b8201836020820111156100fb57600080fd5b8035906020019184600183028401116401000000008311171561011d57600080fd5b90919293919293908035906020019064010000000081111561013e57600080fd5b82018360208201111561015057600080fd5b8035906020019184600183028401116401000000008311171561017257600080fd5b90919293919293908035906020019064010000000081111561019357600080fd5b8201836020820111156101a557600080fd5b803590602001918460018302840111640100000000831117156101c757600080fd5b909192939192939080359060200190929190505050610370565b005b3480156101ef57600080fd5b506101f8610fd0565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561023857808201518184015260208101905061021d565b50505050905090810190601f1680156102655780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561027f57600080fd5b50610288610fe2565b6040518082815260200191505060405180910390f35b60007f01ffc9a7000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916148061036957507f85640907000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b9050919050565b603087879050146103cc576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806116ec6026913960400191505060405180910390fd5b60208585905014610428576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260368152602001806116836036913960400191505060405180910390fd5b60608383905014610484576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602981526020018061175f6029913960400191505060405180910390fd5b670de0b6b3a76400003410156104e5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806117396026913960400191505060405180910390fd5b6000633b9aca0034816104f457fe5b061461054b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806116b96033913960400191505060405180910390fd5b6000633b9aca00348161055a57fe5b04905067ffffffffffffffff80168111156105c0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260278152602001806117126027913960400191505060405180910390fd5b60606105cb82611314565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a610600602054611314565b60405180806020018060200180602001806020018060200186810386528e8e82818152602001925080828437600081840152601f19601f82011690508083019250505086810385528c8c82818152602001925080828437600081840152601f19601f82011690508083019250505086810384528a818151815260200191508051906020019080838360005b838110156106a657808201518184015260208101905061068b565b50505050905090810190601f1680156106d35780820380516001836020036101000a031916815260200191505b508681038352898982818152602001925080828437600081840152601f19601f820116905080830192505050868103825287818151815260200191508051906020019080838360005b8381101561073757808201518184015260208101905061071c565b50505050905090810190601f1680156107645780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b6040516020018084848082843780830192505050826fffffffffffffffffffffffffffffffff19166fffffffffffffffffffffffffffffffff1916815260100193505050506040516020818303038152906040526040518082805190602001908083835b6020831061080e57805182526020820191506020810190506020830392506107eb565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610850573d6000803e3d6000fd5b5050506040513d602081101561086557600080fd5b8101908080519060200190929190505050905060006002808888600090604092610891939291906115da565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108eb57805182526020820191506020810190506020830392506108c8565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa15801561092d573d6000803e3d6000fd5b5050506040513d602081101561094257600080fd5b8101908080519060200190929190505050600289896040908092610968939291906115da565b6000801b604051602001808484808284378083019250505082815260200193505050506040516020818303038152906040526040518082805190602001908083835b602083106109cd57805182526020820191506020810190506020830392506109aa565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610a0f573d6000803e3d6000fd5b5050506040513d6020811015610a2457600080fd5b810190808051906020019092919050505060405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b60208310610a8e5780518252602082019150602081019050602083039250610a6b565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610ad0573d6000803e3d6000fd5b5050506040513d6020811015610ae557600080fd5b810190808051906020019092919050505090506000600280848c8c604051602001808481526020018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610b615780518252602082019150602081019050602083039250610b3e565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610ba3573d6000803e3d6000fd5b5050506040513d6020811015610bb857600080fd5b8101908080519060200190929190505050600286600060401b866040516020018084805190602001908083835b60208310610c085780518252602082019150602081019050602083039250610be5565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610c935780518252602082019150602081019050602083039250610c70565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610cd5573d6000803e3d6000fd5b5050506040513d6020811015610cea57600080fd5b810190808051906020019092919050505060405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b60208310610d545780518252602082019150602081019050602083039250610d31565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610d96573d6000803e3d6000fd5b5050506040513d6020811015610dab57600080fd5b81019080805190602001909291905050509050858114610e16576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252605481526020018061162f6054913960600191505060405180910390fd5b6001602060020a0360205410610e77576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602181526020018061160e6021913960400191505060405180910390fd5b60016020600082825401925050819055506000602054905060008090505b6020811015610fb75760018083161415610ec8578260008260208110610eb757fe5b018190555050505050505050610fc7565b600260008260208110610ed757fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b60208310610f335780518252602082019150602081019050602083039250610f10565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa158015610f75573d6000803e3d6000fd5b5050506040513d6020811015610f8a57600080fd5b8101908080519060200190929190505050925060028281610fa757fe5b0491508080600101915050610e95565b506000610fc057fe5b5050505050505b50505050505050565b6060610fdd602054611314565b905090565b6000806000602054905060008090505b60208110156111d057600180831614156110e05760026000826020811061101557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b60208310611071578051825260208201915060208101905060208303925061104e565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa1580156110b3573d6000803e3d6000fd5b5050506040513d60208110156110c857600080fd5b810190808051906020019092919050505092506111b6565b600283602183602081106110f057fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061114b5780518252602082019150602081019050602083039250611128565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa15801561118d573d6000803e3d6000fd5b5050506040513d60208110156111a257600080fd5b810190808051906020019092919050505092505b600282816111c057fe5b0491508080600101915050610ff2565b506002826111df602054611314565b600060401b6040516020018084815260200183805190602001908083835b6020831061122057805182526020820191506020810190506020830392506111fd565b6001836020036101000a0380198251168184511680821785525050505050509050018267ffffffffffffffff191667ffffffffffffffff1916815260180193505050506040516020818303038152906040526040518082805190602001908083835b602083106112a55780518252602082019150602081019050602083039250611282565b6001836020036101000a038019825116818451168082178552505050505050905001915050602060405180830381855afa1580156112e7573d6000803e3d6000fd5b5050506040513d60208110156112fc57600080fd5b81019080805190602001909291905050509250505090565b6060600867ffffffffffffffff8111801561132e57600080fd5b506040519080825280601f01601f1916602001820160405280156113615781602001600182028036833780820191505090505b50905060008260c01b90508060076008811061137957fe5b1a60f81b8260008151811061138a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350806006600881106113c657fe5b1a60f81b826001815181106113d757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060056008811061141357fe5b1a60f81b8260028151811061142457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060046008811061146057fe5b1a60f81b8260038151811061147157fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350806003600881106114ad57fe5b1a60f81b826004815181106114be57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350806002600881106114fa57fe5b1a60f81b8260058151811061150b57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060016008811061154757fe5b1a60f81b8260068151811061155857fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060006008811061159457fe5b1a60f81b826007815181106115a557fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b600080858511156115ea57600080fd5b838611156115f757600080fd5b600185028301915084860390509450949250505056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a2646970667358221220230afd4b6e3551329e50f1239e08fa3ab7907b77403c4f237d9adf679e8e43cf64736f6c634300060b0033", + "balance": "0x0" + }, + "4e59b44847b379578588920ca78fbf26c0b4956c": { + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "balance": "0x0" + }, + "5678e9e827b3be0e3d4b910126a64a697a148267": { + "balance": "0x43c33c1937564800000" + }, + "70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0x21e19e0c9bab2400000" + }, + "71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0x21e19e0c9bab2400000" + }, + "8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0x21e19e0c9bab2400000" + }, + "90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0x21e19e0c9bab2400000" + }, + "976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0x21e19e0c9bab2400000" + }, + "9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0x21e19e0c9bab2400000" + }, + "a0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0x21e19e0c9bab2400000" + }, + "bcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0x21e19e0c9bab2400000" + }, + "bda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0x21e19e0c9bab2400000" + }, + "cd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0x21e19e0c9bab2400000" + }, + "dd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0x21e19e0c9bab2400000" + }, + "df3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0x21e19e0c9bab2400000" + }, + "f39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0x21e19e0c9bab2400000" + }, + "fabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0x21e19e0c9bab2400000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": null, + "excessBlobGas": null, + "blobGasUsed": null +} \ No newline at end of file diff --git a/alexfdata/jwt.hex b/alexfdata/jwt.hex new file mode 100644 index 000000000000..c4454fe4fa34 --- /dev/null +++ b/alexfdata/jwt.hex @@ -0,0 +1 @@ +0xfad2709d0bb03bf0e8ba3c99bea194575d3e98863133d1af638ed056d1d59345 \ No newline at end of file diff --git a/alexfdata/pwd b/alexfdata/pwd new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/alexfdata/secret.txt b/alexfdata/secret.txt new file mode 100644 index 000000000000..e4378ae4a21e --- /dev/null +++ b/alexfdata/secret.txt @@ -0,0 +1,2 @@ +2e0834786285daccd064ca17f1654f67b4aef298acbb82cef9ec422fb4975622 + From ba5ba437c7172fa09fd419b248cbce5e1cc3a622 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 30 Apr 2024 13:33:46 +0200 Subject: [PATCH 03/73] TMP: check in ignored files --- .gitignore | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.gitignore b/.gitignore index 3f27cdc00f07..0e22b69849fa 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,10 @@ profile.cov logs/ tests/spec-tests/ +/alexfdata/gethdata/ +/alexfdata/beacondata/ +/alexfdata/validatordata/ +/alexfdata/beacon_chain +/alexfdata/prysmctl +/alexfdata/validator +/alexfdata/genesis.ssz From f517bf70ba25475eeffb782b63b15d11abc93b2f Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 30 Apr 2024 13:55:19 +0200 Subject: [PATCH 04/73] Create a separate 'cleanup.sh' command --- alexfdata/commands/0-cleanup.sh | 9 +++++++++ alexfdata/commands/1-prysmctl.sh | 4 +--- alexfdata/commands/2-geth.sh | 2 -- alexfdata/commands/3-beacon.sh | 4 +--- alexfdata/commands/4-validator.sh | 1 - alexfdata/genesis.json | 2 +- 6 files changed, 12 insertions(+), 10 deletions(-) create mode 100755 alexfdata/commands/0-cleanup.sh diff --git a/alexfdata/commands/0-cleanup.sh b/alexfdata/commands/0-cleanup.sh new file mode 100755 index 000000000000..c189d05561f0 --- /dev/null +++ b/alexfdata/commands/0-cleanup.sh @@ -0,0 +1,9 @@ +#!/bin/sh -e + +rm ../genesis.ssz + +rm -rf ../gethdata + +rm -rf ../beacondata + +rm -rf ../validatordata diff --git a/alexfdata/commands/1-prysmctl.sh b/alexfdata/commands/1-prysmctl.sh index 652b1df156d1..77abd30fb90f 100755 --- a/alexfdata/commands/1-prysmctl.sh +++ b/alexfdata/commands/1-prysmctl.sh @@ -1,5 +1,3 @@ #!/bin/sh -e -rm ../genesis.ssz - -../prysmctl testnet generate-genesis --fork capella --num-validators 64 --genesis-time-delay 600 --chain-config-file ../config.yml --geth-genesis-json-in ../genesis.json --geth-genesis-json-out ../genesis.json --output-ssz ../genesis.ssz +../prysmctl testnet generate-genesis --fork capella --num-validators 64 --genesis-time-delay 15 --chain-config-file ../config.yml --geth-genesis-json-in ../genesis.json --geth-genesis-json-out ../genesis.json --output-ssz ../genesis.ssz diff --git a/alexfdata/commands/2-geth.sh b/alexfdata/commands/2-geth.sh index 417808c08995..6c080e406f64 100755 --- a/alexfdata/commands/2-geth.sh +++ b/alexfdata/commands/2-geth.sh @@ -1,7 +1,5 @@ #!/bin/sh -e -rm -rf ../gethdata - ../../build/bin/geth --datadir=../gethdata --password=../pwd account import ../secret.txt ../../build/bin/geth --datadir=../gethdata init ../genesis.json diff --git a/alexfdata/commands/3-beacon.sh b/alexfdata/commands/3-beacon.sh index da22de713756..e6888b06957c 100755 --- a/alexfdata/commands/3-beacon.sh +++ b/alexfdata/commands/3-beacon.sh @@ -1,5 +1,3 @@ #!/bin/sh -e -rm -rf ../beacondata - -../beacon_chain --datadir ../beacondata --min-sync-peers 0 --genesis-state ../genesis.ssz --bootstrap-node= --interop-eth1data-votes --chain-config-file ../config.yml --contract-deployment-block 0 --chain-id 32382 --accept-terms-of-use --jwt-secret ../jwt.hex --suggested-fee-recipient 0x123463a4B065722E99115D6c222f267d9cABb524 --minimum-peers-per-subnet 0 --enable-debug-rpc-endpoints --execution-endpoint ../gethdata/geth.ipc +../beacon_chain --datadir ../beacondata --min-sync-peers 0 --genesis-state ../genesis.ssz --bootstrap-node= --interop-eth1data-votes --chain-config-file ../config.yml --contract-deployment-block 0 --chain-id 1337 --accept-terms-of-use --jwt-secret ../jwt.hex --suggested-fee-recipient 0x123463a4B065722E99115D6c222f267d9cABb524 --minimum-peers-per-subnet 0 --enable-debug-rpc-endpoints --execution-endpoint ../gethdata/geth.ipc diff --git a/alexfdata/commands/4-validator.sh b/alexfdata/commands/4-validator.sh index 5a35db216459..2deecd3f09dd 100755 --- a/alexfdata/commands/4-validator.sh +++ b/alexfdata/commands/4-validator.sh @@ -1,5 +1,4 @@ #!/bin/sh -e -rm -rf ../validatordata ../validator --datadir ../validatordata --accept-terms-of-use --interop-num-validators 64 --chain-config-file ../config.yml diff --git a/alexfdata/genesis.json b/alexfdata/genesis.json index cfa44c4d89dc..7af9fa084118 100644 --- a/alexfdata/genesis.json +++ b/alexfdata/genesis.json @@ -1,6 +1,6 @@ { "config": { - "chainId": 32382, + "chainId": 1337, "homesteadBlock": 0, "daoForkSupport": true, "eip150Block": 0, From bcca728d6270c22eae33b65df633727bd65c7995 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 30 Apr 2024 14:10:36 +0200 Subject: [PATCH 05/73] Set slot time to 3 seconds; check in "--dev" mode idea command --- .run/go build geth.run.xml | 13 +++++++++++++ alexfdata/config.yml | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 .run/go build geth.run.xml diff --git a/.run/go build geth.run.xml b/.run/go build geth.run.xml new file mode 100644 index 000000000000..e2bd1f6dde94 --- /dev/null +++ b/.run/go build geth.run.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/alexfdata/config.yml b/alexfdata/config.yml index 372cd8fb18cd..45915e355a9e 100644 --- a/alexfdata/config.yml +++ b/alexfdata/config.yml @@ -21,7 +21,7 @@ MAX_WITHDRAWALS_PER_PAYLOAD: 16 DENEB_FORK_VERSION: 0x20000093 # Time parameters -SECONDS_PER_SLOT: 12 +SECONDS_PER_SLOT: 3 SLOTS_PER_EPOCH: 6 # Deposit contract From 75fd756a258e015bf49234c715ee0e29b6283d95 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 30 Apr 2024 15:14:21 +0200 Subject: [PATCH 06/73] Remove code related to EOA-based RIP-7560 transactions --- core/state_processor_rip7560.go | 114 ++++------------------ core/types/transaction.go | 2 + core/types/transaction_signing.go | 2 + core/types/transaction_signing_rip7560.go | 47 +++++++++ internal/ethapi/transaction_args.go | 39 ++++++++ params/config.go | 8 ++ 6 files changed, 116 insertions(+), 96 deletions(-) create mode 100644 core/types/transaction_signing_rip7560.go diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index c0c3d9f7ae7a..b5d55ef716a1 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -27,7 +27,6 @@ type ValidationPhaseResult struct { SenderValidUntil uint64 PmValidAfter uint64 PmValidUntil uint64 - IsEOA bool } // HandleRip7560Transactions apply state changes of all sequential RIP-7560 transactions and return @@ -38,22 +37,13 @@ func HandleRip7560Transactions(transactions []*types.Transaction, index int, sta receipts := make([]*types.Receipt, 0) allLogs := make([]*types.Log, 0) - i := index - for { - if i >= len(transactions) { - break - } - if transactions[i].Type() != types.Rip7560Type { - break - } - iTransactions, iReceipts, iLogs, err := handleRip7560Transactions(transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg) - if err != nil { - return nil, nil, nil, err - } - validatedTransactions = append(validatedTransactions, iTransactions...) - receipts = append(receipts, iReceipts...) - allLogs = append(allLogs, iLogs...) + iTransactions, iReceipts, iLogs, err := handleRip7560Transactions(transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg) + if err != nil { + return nil, nil, nil, err } + validatedTransactions = append(validatedTransactions, iTransactions...) + receipts = append(receipts, iReceipts...) + allLogs = append(allLogs, iLogs...) return validatedTransactions, receipts, allLogs, nil } @@ -62,47 +52,31 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta validatedTransactions := make([]*types.Transaction, 0) receipts := make([]*types.Receipt, 0) allLogs := make([]*types.Log, 0) - signer := types.MakeSigner(chainConfig, header.Number, header.Time) for i, tx := range transactions[index:] { if tx.Type() != types.Rip7560Type { break } aatx := tx.Rip7560TransactionData() - isEoa, err := isTransactionEOA(tx, statedb, signer) + statedb.SetTxContext(tx.Hash(), index+i) + err := BuyGasRip7560Transaction(aatx, statedb) + var vpr *ValidationPhaseResult if err != nil { return nil, nil, nil, err } - statedb.SetTxContext(tx.Hash(), index+i) - err = BuyGasRip7560Transaction(aatx, statedb) - var vpr *ValidationPhaseResult - if isEoa { - blockContext := NewEVMBlockContext(header, bc, coinbase) - tmpMsg, err := TransactionToMessage(tx, signer, header.BaseFee) - txContext := NewEVMTxContext(tmpMsg) - evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) - //signer := types.MakeSigner(chainConfig, header.Number, header.Time) - signingHash := signer.Hash(tx) - vpr, err = validateRip7560TransactionFromEOA(tx, signingHash, statedb, evm, coinbase, header, gp, chainConfig) - if err != nil { - return nil, nil, nil, err - } - } else { - if err != nil { - return nil, nil, nil, err - } - vpr, err = ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) - if err != nil { - return nil, nil, nil, err - } + vpr, err = ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) + if err != nil { + return nil, nil, nil, err } validationPhaseResults = append(validationPhaseResults, vpr) validatedTransactions = append(validatedTransactions, tx) - } - for i, vpr := range validationPhaseResults { + + // This is the line separating the Validation and Execution phases + // It should be separated to implement the mempool-friendly AA RIP (number not assigned yet) + // for i, vpr := range validationPhaseResults // TODO: this will miss all validation phase events - pass in 'vpr' - statedb.SetTxContext(vpr.Tx.Hash(), i) + // statedb.SetTxContext(vpr.Tx.Hash(), i) receipt, err := ApplyRip7560ExecutionPhase(chainConfig, vpr, bc, coinbase, gp, statedb, header, cfg) @@ -139,49 +113,6 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St return nil } -// TODO: not needed with subtype - only use to validate transaction, maybe -func isTransactionEOA(tx *types.Transaction, statedb *state.StateDB, signer types.Signer) (bool, error) { - aatx := tx.Rip7560TransactionData() - senderHasCode := statedb.GetCodeSize(*aatx.Sender) != 0 || len(aatx.DeployerData) != 0 - if senderHasCode { - return false, nil - } - address, err := signer.Sender(tx) - if err != nil { - return false, err - } - if address.Cmp(*tx.Rip7560TransactionData().Sender) != 0 { - return false, errors.New("recovered signature does not match the claimed EOA sender") - } - return true, nil -} - -func validateRip7560TransactionFromEOA(tx *types.Transaction, signingHash common.Hash, statedb *state.StateDB, evm *vm.EVM, coinbase *common.Address, header *types.Header, gp *GasPool, chainConfig *params.ChainConfig) (*ValidationPhaseResult, error) { - // TODO: paymaste is actually optional for eoa-type-4 -> check paymaster data len() - paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) - if err != nil { - return nil, err - } - err = validateValidityTimeRange(header.Time, pmValidAfter, pmValidUntil) - if err != nil { - return nil, err - } - vpr := &ValidationPhaseResult{ - Tx: tx, - TxHash: tx.Hash(), - PaymasterContext: paymasterContext, - DeploymentUsedGas: 0, - ValidationUsedGas: 0, - PmValidationUsedGas: pmValidationUsedGas, - SenderValidAfter: 0, - SenderValidUntil: 0, - PmValidAfter: pmValidAfter, - PmValidUntil: pmValidUntil, - IsEOA: true, - } - return vpr, nil -} - func ApplyRip7560FrameMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) { return NewRip7560StateTransition(evm, msg, gp).TransitionDb() } @@ -311,7 +242,6 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte SenderValidUntil: validUntil, PmValidAfter: pmValidAfter, PmValidUntil: pmValidUntil, - IsEOA: false, } return vpr, nil @@ -373,15 +303,7 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase txContext.Origin = *vpr.Tx.Rip7560TransactionData().Sender evm := vm.NewEVM(blockContext, txContext, statedb, config, cfg) - var accountExecutionMsg *Message - if vpr.IsEOA { - accountExecutionMsg, err = prepareEOATargetExecutionMessage(vpr.Tx) - if err != nil { - return nil, err - } - } else { - accountExecutionMsg = prepareAccountExecutionMessage(vpr.Tx, evm.ChainConfig()) - } + accountExecutionMsg := prepareAccountExecutionMessage(vpr.Tx, evm.ChainConfig()) executionResult, err := ApplyRip7560FrameMessage(evm, accountExecutionMsg, gp) if err != nil { return nil, err diff --git a/core/types/transaction.go b/core/types/transaction.go index e8b389d8e6f7..996fee8e76be 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -207,6 +207,8 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { inner = new(DynamicFeeTx) case BlobTxType: inner = new(BlobTx) + case Rip7560Type: + inner = new(Rip7560AccountAbstractionTx) default: return nil, ErrTxTypeNotSupported } diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 6e5f6712f81b..eed9e35c99e4 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -40,6 +40,8 @@ type sigCache struct { func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) Signer { var signer Signer switch { + case config.IsRIP7560(blockNumber): + signer = NewRIP7560Signer(config.ChainID) case config.IsCancun(blockNumber, blockTime): signer = NewCancunSigner(config.ChainID) case config.IsLondon(blockNumber): diff --git a/core/types/transaction_signing_rip7560.go b/core/types/transaction_signing_rip7560.go new file mode 100644 index 000000000000..32fd2c1681f8 --- /dev/null +++ b/core/types/transaction_signing_rip7560.go @@ -0,0 +1,47 @@ +package types + +import ( + "github.com/ethereum/go-ethereum/common" + "math/big" +) + +type rip7560Signer struct{ londonSigner } + +func NewRIP7560Signer(chainId *big.Int) Signer { + return rip7560Signer{londonSigner{eip2930Signer{NewEIP155Signer(chainId)}}} +} + +func (s rip7560Signer) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != Rip7560Type { + return s.londonSigner.Sender(tx) + } + return [20]byte{}, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s rip7560Signer) Hash(tx *Transaction) common.Hash { + if tx.Type() != Rip7560Type { + return s.londonSigner.Hash(tx) + } + aatx := tx.Rip7560TransactionData() + return prefixedRlpHash( + tx.Type(), + []interface{}{ + s.chainId, + tx.GasTipCap(), + tx.GasFeeCap(), + tx.Gas(), + //tx.To(), + tx.Data(), + tx.AccessList(), + + aatx.Sender, + aatx.PaymasterData, + aatx.DeployerData, + aatx.BuilderFee, + aatx.ValidationGas, + aatx.PaymasterGas, + aatx.BigNonce, + }) +} diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index f199f9d91253..e6b7cd13b923 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -74,6 +74,17 @@ type TransactionArgs struct { // This configures whether blobs are allowed to be passed. blobSidecarAllowed bool + + // Introduced by RIP-7560 Transaction + Subtype *hexutil.Uint64 + Sender *common.Address `json:"sender"` + Signature *hexutil.Bytes + PaymasterData *hexutil.Bytes `json:"paymasterData"` + DeployerData *hexutil.Bytes + BuilderFee *hexutil.Big + ValidationGas *hexutil.Uint64 + PaymasterGas *hexutil.Uint64 + BigNonce *hexutil.Big // AA nonce is 256 bits wide } // from retrieves the transaction sender address. @@ -472,6 +483,34 @@ func (args *TransactionArgs) ToMessage(baseFee *big.Int) *core.Message { func (args *TransactionArgs) ToTransaction() *types.Transaction { var data types.TxData switch { + case args.Sender != nil: + al := types.AccessList{} + if args.AccessList != nil { + al = *args.AccessList + } + aatx := types.Rip7560AccountAbstractionTx{ + Subtype: byte(*args.Subtype), + To: &common.Address{}, + ChainID: (*big.Int)(args.ChainID), + Gas: uint64(*args.Gas), + GasFeeCap: (*big.Int)(args.MaxFeePerGas), + GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas), + Value: (*big.Int)(args.Value), + Data: args.data(), + AccessList: al, + // RIP-7560 parameters + Sender: args.Sender, + Signature: *args.Signature, + PaymasterData: *args.PaymasterData, + DeployerData: *args.DeployerData, + BuilderFee: (*big.Int)(args.BuilderFee), + ValidationGas: uint64(*args.ValidationGas), + PaymasterGas: uint64(*args.PaymasterGas), + BigNonce: (*big.Int)(args.BigNonce), + } + data = &aatx + hash := types.NewTx(data).Hash() + log.Error("RIP-7560 transaction created", "sender", aatx.Sender.Hex(), "hash", hash) case args.BlobHashes != nil: al := types.AccessList{} if args.AccessList != nil { diff --git a/params/config.go b/params/config.go index 9d9fc242bc3b..eeb666eb77ae 100644 --- a/params/config.go +++ b/params/config.go @@ -173,6 +173,7 @@ var ( EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), + RIP7560Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), @@ -336,6 +337,8 @@ type ChainConfig struct { EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block + RIP7560Block *big.Int `json:"rip7560block,omitempty"` // RIP7560 HF block + ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium) ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated) PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople) @@ -586,6 +589,11 @@ func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) } +// IsRIP7560 returns whether num is either equal to the RIP7560 fork block or greater. +func (c *ChainConfig) IsRIP7560(num *big.Int) bool { + return isBlockForked(c.RIP7560Block, num) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { From c6fc74b5f831fb76c696a1de02cccba7ca99d881 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 30 Apr 2024 17:10:46 +0200 Subject: [PATCH 07/73] Remove BigNonce and NonceManager logic as it has moved to separate RIP --- core/state_processor.go | 11 ++++ core/state_processor_rip7560.go | 75 +---------------------- core/types/transaction_signing_rip7560.go | 1 - core/types/tx_rip7560.go | 8 +-- internal/ethapi/transaction_args.go | 2 - params/config.go | 1 - 6 files changed, 15 insertions(+), 83 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index b1a8938f677a..abe6353f2c7f 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -82,6 +82,17 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Iterate over and process the individual transactions for i, tx := range block.Transactions() { + if tx.Type() == types.Rip7560Type { + // HandleRip7560Transactions accepts a transaction array and in the future bundle handling will need this + tmpTxs := [1]*types.Transaction{tx} + _, validatedTxsReceipts, validateTxsLogs, err := HandleRip7560Transactions(tmpTxs[:], 0, statedb, &context.Coinbase, header, gp, p.config, p.bc, cfg) + receipts = append(receipts, validatedTxsReceipts...) + allLogs = append(allLogs, validateTxsLogs...) + if err != nil { + return nil, nil, 0, err + } + continue + } msg, err := TransactionToMessage(tx, signer, header.BaseFee) if err != nil { return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index b5d55ef716a1..7cbbd8f1dcd9 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -128,71 +128,12 @@ func NewRip7560StateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTra } } -// GetRip7560AccountNonce reads the two-dimensional RIP-7560 nonce from the given blockchain state -func GetRip7560AccountNonce(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config, sender common.Address, nonceKey *big.Int) uint64 { - - // todo: this is a copy paste of 5 lines that need 8 parameters to run, wtf? - blockContext := NewEVMBlockContext(header, bc, author) - message, err := TransactionToMessage(tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) - txContext := NewEVMTxContext(message) - vmenv := vm.NewEVM(blockContext, txContext, statedb, config, cfg) - vmenv.Reset(txContext, statedb) // TODO what does this 'reset' do? - - from := common.HexToAddress("0x0000000000000000000000000000000000000000") - // todo: read NM address from global config - nonceManager := common.HexToAddress("0xdebc121d1b09bc03ff57fa1f96514d04a1f0f59d") - fromBigNonceKey256, _ := uint256.FromBig(nonceKey) - key := make([]byte, 24) - fromBigNonceKey256.WriteToSlice(key) - nonceManagerData := make([]byte, 0) - nonceManagerData = append(nonceManagerData[:], sender.Bytes()...) - nonceManagerData = append(nonceManagerData[:], key...) - - nonceManagerMsg := &Message{ - From: from, - To: &nonceManager, - Value: big.NewInt(0), - GasLimit: 100000, - GasPrice: big.NewInt(875000000), - GasFeeCap: big.NewInt(875000000), - GasTipCap: big.NewInt(875000000), - Data: nonceManagerData, - AccessList: make(types.AccessList, 0), - SkipAccountChecks: true, - IsRip7560Frame: true, - } - resultNonceManager, err := ApplyRip7560FrameMessage(vmenv, nonceManagerMsg, gp) - if err != nil { - // todo: handle - return 777 - } - if resultNonceManager.Err != nil { - return 888 - } - if resultNonceManager.ReturnData == nil { - return 999 - } - return big.NewInt(0).SetBytes(resultNonceManager.ReturnData).Uint64() -} - func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { - /*** Nonce Manger Frame ***/ - nonceManagerMsg := prepareNonceManagerMessage(tx, chainConfig) - + stubMsg := prepareStubMessage(tx, chainConfig) blockContext := NewEVMBlockContext(header, bc, author) - txContext := NewEVMTxContext(nonceManagerMsg) + txContext := NewEVMTxContext(stubMsg) txContext.Origin = *tx.Rip7560TransactionData().Sender evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) - - resultNonceManager, err := ApplyRip7560FrameMessage(evm, nonceManagerMsg, gp) - if err != nil { - return nil, err - } - statedb.IntermediateRoot(true) - if resultNonceManager.Err != nil { - return nil, resultNonceManager.Err - } - /*** Deployer Frame ***/ deployerMsg := prepareDeployerMessage(tx, chainConfig) var deploymentUsedGas uint64 @@ -340,25 +281,15 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase } return receipt, err } - -func prepareNonceManagerMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig) *Message { +func prepareStubMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig) *Message { tx := baseTx.Rip7560TransactionData() - key := make([]byte, 32) - fromBig, _ := uint256.FromBig(tx.BigNonce) - fromBig.WriteToSlice(key) - - nonceManagerData := make([]byte, 0) - nonceManagerData = append(nonceManagerData[:], tx.Sender.Bytes()...) - nonceManagerData = append(nonceManagerData[:], key...) return &Message{ From: chainConfig.EntryPointAddress, - To: &chainConfig.NonceManagerAddress, Value: big.NewInt(0), GasLimit: 100000, GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, - Data: nonceManagerData, AccessList: make(types.AccessList, 0), SkipAccountChecks: true, IsRip7560Frame: true, diff --git a/core/types/transaction_signing_rip7560.go b/core/types/transaction_signing_rip7560.go index 32fd2c1681f8..0a84b46e56ad 100644 --- a/core/types/transaction_signing_rip7560.go +++ b/core/types/transaction_signing_rip7560.go @@ -42,6 +42,5 @@ func (s rip7560Signer) Hash(tx *Transaction) common.Hash { aatx.BuilderFee, aatx.ValidationGas, aatx.PaymasterGas, - aatx.BigNonce, }) } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index b63de28dc94f..335b58683d75 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -46,7 +46,6 @@ type Rip7560AccountAbstractionTx struct { ValidationGas uint64 PaymasterGas uint64 PostOpGas uint64 - BigNonce *big.Int // removed fields To *common.Address @@ -68,7 +67,6 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { GasTipCap: new(big.Int), GasFeeCap: new(big.Int), - BigNonce: new(big.Int), Sender: copyAddressPtr(tx.Sender), Signature: common.CopyBytes(tx.Signature), PaymasterData: common.CopyBytes(tx.PaymasterData), @@ -90,9 +88,6 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { if tx.GasFeeCap != nil { cpy.GasFeeCap.Set(tx.GasFeeCap) } - if tx.BigNonce != nil { - cpy.BigNonce.Set(tx.BigNonce) - } if tx.BuilderFee != nil { cpy.BuilderFee.Set(tx.BuilderFee) } @@ -110,7 +105,6 @@ func (tx *Rip7560AccountAbstractionTx) gasTipCap() *big.Int { return tx.GasTi func (tx *Rip7560AccountAbstractionTx) gasPrice() *big.Int { return tx.GasFeeCap } func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return tx.Value } func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return 0 } -func (tx *Rip7560AccountAbstractionTx) bigNonce() *big.Int { return tx.BigNonce } func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return tx.To } func (tx *Rip7560AccountAbstractionTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { @@ -181,7 +175,7 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { } record := &Rip7560Transaction{ Sender: *tx.Sender, - Nonce: tx.BigNonce, + Nonce: big.NewInt(int64(tx.Nonce)), ValidationGasLimit: big.NewInt(int64(tx.ValidationGas)), PaymasterGasLimit: big.NewInt(int64(tx.PaymasterGas)), CallGasLimit: big.NewInt(int64(tx.Gas)), diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index e6b7cd13b923..323dcc1a5084 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -84,7 +84,6 @@ type TransactionArgs struct { BuilderFee *hexutil.Big ValidationGas *hexutil.Uint64 PaymasterGas *hexutil.Uint64 - BigNonce *hexutil.Big // AA nonce is 256 bits wide } // from retrieves the transaction sender address. @@ -506,7 +505,6 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { BuilderFee: (*big.Int)(args.BuilderFee), ValidationGas: uint64(*args.ValidationGas), PaymasterGas: uint64(*args.PaymasterGas), - BigNonce: (*big.Int)(args.BigNonce), } data = &aatx hash := types.NewTx(data).Hash() diff --git a/params/config.go b/params/config.go index eeb666eb77ae..d754edcd6f8f 100644 --- a/params/config.go +++ b/params/config.go @@ -374,7 +374,6 @@ type ChainConfig struct { // RIP-7560 specific config parameters EntryPointAddress common.Address `json:"entryPointAddress,omitempty"` - NonceManagerAddress common.Address `json:"nonceManagerAddress,omitempty"` DeployerCallerAddress common.Address `json:"deployerCallerAddress,omitempty"` } From 28a79ec98deac6791d14407896d997eb44d9ccf1 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 27 May 2024 20:33:09 +0200 Subject: [PATCH 08/73] Fix issue: charging 'from' address (EntryPoint) for inner frames gas --- core/state_transition.go | 16 ++++++++++++++-- core/types/tx_rip7560.go | 14 ++++---------- internal/ethapi/transaction_args.go | 4 ++-- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/core/state_transition.go b/core/state_transition.go index 70b642e74798..9391c2855737 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -68,7 +68,10 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860 bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860 bool, isRIP7560InnerFrame ...bool) (uint64, error) { + if isRIP7560InnerFrame != nil && len(isRIP7560InnerFrame) > 0 && isRIP7560InnerFrame[0] { + return 0, nil + } // Set the starting gas for the raw transaction var gas uint64 if isContractCreation && isHomestead { @@ -358,6 +361,15 @@ func (st *StateTransition) preCheck() error { } } } + + // no need to "buy gus" for individual frames + // there is a single shared gas pre-charge + if st.rip7560Frame { + st.gasRemaining += st.msg.GasLimit + st.initialGas = st.msg.GasLimit + return nil + } + return st.buyGas() } @@ -395,7 +407,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ) // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(msg.Data, msg.AccessList, contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsShanghai) + gas, err := IntrinsicGas(msg.Data, msg.AccessList, contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsShanghai, msg.IsRip7560Frame) if err != nil { return nil, err } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 335b58683d75..47019c750b49 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -24,11 +24,8 @@ import ( "math/big" ) -const ScaTransactionSubtype = 0x01 - // Rip7560AccountAbstractionTx represents an RIP-7560 transaction. type Rip7560AccountAbstractionTx struct { - Subtype byte // overlapping fields ChainID *big.Int GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas @@ -56,10 +53,9 @@ type Rip7560AccountAbstractionTx struct { // copy creates a deep copy of the transaction data and initializes all fields. func (tx *Rip7560AccountAbstractionTx) copy() TxData { cpy := &Rip7560AccountAbstractionTx{ - Subtype: tx.Subtype, - To: copyAddressPtr(tx.To), - Data: common.CopyBytes(tx.Data), - Gas: tx.Gas, + To: copyAddressPtr(tx.To), + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, // These are copied below. AccessList: make(AccessList, len(tx.AccessList)), Value: new(big.Int), @@ -128,14 +124,12 @@ func (tx *Rip7560AccountAbstractionTx) setSignatureValues(chainID, v, r, s *big. // encode the subtype byte and the payload-bearing bytes of the RIP-7560 transaction func (tx *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { - b.WriteByte(ScaTransactionSubtype) return rlp.Encode(b, tx) } // decode the payload-bearing bytes of the encoded RIP-7560 transaction payload func (tx *Rip7560AccountAbstractionTx) decode(input []byte) error { - tx.Subtype = ScaTransactionSubtype - return rlp.DecodeBytes(input[1:], tx) + return rlp.DecodeBytes(input, tx) } // Rip7560Transaction an equivalent of a solidity struct only used to encode the 'transaction' parameter diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 323dcc1a5084..ed7dfcbaf688 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -76,7 +76,6 @@ type TransactionArgs struct { blobSidecarAllowed bool // Introduced by RIP-7560 Transaction - Subtype *hexutil.Uint64 Sender *common.Address `json:"sender"` Signature *hexutil.Bytes PaymasterData *hexutil.Bytes `json:"paymasterData"` @@ -84,6 +83,7 @@ type TransactionArgs struct { BuilderFee *hexutil.Big ValidationGas *hexutil.Uint64 PaymasterGas *hexutil.Uint64 + PostOpGas *hexutil.Uint64 } // from retrieves the transaction sender address. @@ -488,7 +488,6 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { al = *args.AccessList } aatx := types.Rip7560AccountAbstractionTx{ - Subtype: byte(*args.Subtype), To: &common.Address{}, ChainID: (*big.Int)(args.ChainID), Gas: uint64(*args.Gas), @@ -505,6 +504,7 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { BuilderFee: (*big.Int)(args.BuilderFee), ValidationGas: uint64(*args.ValidationGas), PaymasterGas: uint64(*args.PaymasterGas), + PostOpGas: uint64(*args.PostOpGas), } data = &aatx hash := types.NewTx(data).Hash() From 19fc05d6c376687d6aad073647417efd0058c7c6 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Wed, 29 May 2024 13:54:37 +0300 Subject: [PATCH 09/73] remove expectedRevenue --- core/txpool/rip7560pool/rip7560pool.go | 11 ++++------- core/types/tx_rip7560.go | 9 ++++----- internal/ethapi/rip7560api.go | 9 ++++----- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/core/txpool/rip7560pool/rip7560pool.go b/core/txpool/rip7560pool/rip7560pool.go index 3c6670d34e4a..19bcad417d3f 100644 --- a/core/txpool/rip7560pool/rip7560pool.go +++ b/core/txpool/rip7560pool/rip7560pool.go @@ -261,13 +261,10 @@ func (pool *Rip7560BundlerPool) GetRip7560BundleStatus(hash common.Hash) (*types return pool.includedBundles[hash], nil } -// Simply returns the bundle with the highest promised revenue by fully trusting the bundler-provided value. +// return first bundle func (pool *Rip7560BundlerPool) selectExternalBundle() *types.ExternallyReceivedBundle { - var selectedBundle *types.ExternallyReceivedBundle - for _, bundle := range pool.pendingBundles { - if selectedBundle == nil || selectedBundle.ExpectedRevenue.Cmp(bundle.ExpectedRevenue) == -1 { - selectedBundle = bundle - } + if len(pool.pendingBundles) == 0 { + return nil } - return selectedBundle + return pool.pendingBundles[0] } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 47019c750b49..aee22df7a387 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -188,11 +188,10 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { // ExternallyReceivedBundle represents a bundle of Type 4 transactions received from a trusted 3rd party. // The validator includes the bundle in the original order atomically or drops it completely. type ExternallyReceivedBundle struct { - BundlerId string - BundleHash common.Hash - ExpectedRevenue *big.Int - ValidForBlock *big.Int - Transactions []*Transaction + BundlerId string + BundleHash common.Hash + ValidForBlock *big.Int + Transactions []*Transaction } // BundleReceipt represents a receipt for an ExternallyReceivedBundle successfully included in a block. diff --git a/internal/ethapi/rip7560api.go b/internal/ethapi/rip7560api.go index 0245b3207791..4e669cc19c5f 100644 --- a/internal/ethapi/rip7560api.go +++ b/internal/ethapi/rip7560api.go @@ -10,7 +10,7 @@ import ( "math/big" ) -func (s *TransactionAPI) SendRip7560TransactionsBundle(ctx context.Context, args []TransactionArgs, creationBlock *big.Int, expectedRevenue *big.Int, bundlerId string) (common.Hash, error) { +func (s *TransactionAPI) SendRip7560TransactionsBundle(ctx context.Context, args []TransactionArgs, creationBlock *big.Int, bundlerId string) (common.Hash, error) { if len(args) == 0 { return common.Hash{}, errors.New("submitted bundle has zero length") } @@ -19,10 +19,9 @@ func (s *TransactionAPI) SendRip7560TransactionsBundle(ctx context.Context, args txs[i] = args[i].ToTransaction() } bundle := &types.ExternallyReceivedBundle{ - BundlerId: bundlerId, - ExpectedRevenue: expectedRevenue, - ValidForBlock: creationBlock, - Transactions: txs, + BundlerId: bundlerId, + ValidForBlock: creationBlock, + Transactions: txs, } bundleHash := calculateBundleHash(txs) bundle.BundleHash = bundleHash From c29c3eb9bc61e77f2645535faddfce5aa6c3c0e1 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 28 May 2024 18:27:33 +0300 Subject: [PATCH 10/73] remove StateTransition rip7560Frame bool is msg.isRip7560Frame, which allows usage of plain ApplyMessage --- core/state_processor_rip7560.go | 25 +++++-------------------- core/state_transition.go | 3 +-- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 7cbbd8f1dcd9..03207bfffc09 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -113,21 +113,6 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St return nil } -func ApplyRip7560FrameMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) { - return NewRip7560StateTransition(evm, msg, gp).TransitionDb() -} - -// NewRip7560StateTransition initialises and returns a new state transition object. -func NewRip7560StateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTransition { - return &StateTransition{ - gp: gp, - evm: evm, - msg: msg, - state: evm.StateDB, - rip7560Frame: true, - } -} - func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { stubMsg := prepareStubMessage(tx, chainConfig) blockContext := NewEVMBlockContext(header, bc, author) @@ -138,7 +123,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte deployerMsg := prepareDeployerMessage(tx, chainConfig) var deploymentUsedGas uint64 if deployerMsg != nil { - resultDeployer, err := ApplyRip7560FrameMessage(evm, deployerMsg, gp) + resultDeployer, err := ApplyMessage(evm, deployerMsg, gp) if err != nil { return nil, err } @@ -154,7 +139,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte signer := types.MakeSigner(chainConfig, header.Number, header.Time) signingHash := signer.Hash(tx) accountValidationMsg, err := prepareAccountValidationMessage(tx, chainConfig, signingHash, deploymentUsedGas) - resultAccountValidation, err := ApplyRip7560FrameMessage(evm, accountValidationMsg, gp) + resultAccountValidation, err := ApplyMessage(evm, accountValidationMsg, gp) if err != nil { return nil, err } @@ -199,7 +184,7 @@ func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.Ch return nil, 0, 0, 0, err } if paymasterMsg != nil { - resultPm, err := ApplyRip7560FrameMessage(evm, paymasterMsg, gp) + resultPm, err := ApplyMessage(evm, paymasterMsg, gp) if err != nil { return nil, 0, 0, 0, err } @@ -226,7 +211,7 @@ func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *Exec if err != nil { return nil, err } - paymasterPostOpResult, err = ApplyRip7560FrameMessage(evm, paymasterPostOpMsg, gp) + paymasterPostOpResult, err = ApplyMessage(evm, paymasterPostOpMsg, gp) if err != nil { return nil, err } @@ -245,7 +230,7 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase evm := vm.NewEVM(blockContext, txContext, statedb, config, cfg) accountExecutionMsg := prepareAccountExecutionMessage(vpr.Tx, evm.ChainConfig()) - executionResult, err := ApplyRip7560FrameMessage(evm, accountExecutionMsg, gp) + executionResult, err := ApplyMessage(evm, accountExecutionMsg, gp) if err != nil { return nil, err } diff --git a/core/state_transition.go b/core/state_transition.go index 9391c2855737..d51833aea3a7 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -217,7 +217,6 @@ type StateTransition struct { initialGas uint64 state vm.StateDB evm *vm.EVM - rip7560Frame bool } // NewStateTransition initialises and returns a new state transition object. @@ -364,7 +363,7 @@ func (st *StateTransition) preCheck() error { // no need to "buy gus" for individual frames // there is a single shared gas pre-charge - if st.rip7560Frame { + if st.msg.IsRip7560Frame { st.gasRemaining += st.msg.GasLimit st.initialGas = st.msg.GasLimit return nil From 117561451bf1af9763b7650dab307e85d28f5105 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Thu, 30 May 2024 13:51:47 +0300 Subject: [PATCH 11/73] add paymaster, deployer optional params (#4) --- core/types/tx_rip7560.go | 10 +++++++++- internal/ethapi/transaction_args.go | 6 +++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index aee22df7a387..96855be3a45d 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -37,7 +37,9 @@ type Rip7560AccountAbstractionTx struct { // extra fields Sender *common.Address Signature []byte + Paymaster *common.Address `rlp:"nil"` PaymasterData []byte + Deployer *common.Address `rlp:"nil"` DeployerData []byte BuilderFee *big.Int ValidationGas uint64 @@ -45,7 +47,7 @@ type Rip7560AccountAbstractionTx struct { PostOpGas uint64 // removed fields - To *common.Address + To *common.Address `rlp:"nil"` Nonce uint64 Value *big.Int } @@ -65,11 +67,14 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { Sender: copyAddressPtr(tx.Sender), Signature: common.CopyBytes(tx.Signature), + Paymaster: copyAddressPtr(tx.Paymaster), PaymasterData: common.CopyBytes(tx.PaymasterData), + Deployer: copyAddressPtr(tx.Deployer), DeployerData: common.CopyBytes(tx.DeployerData), BuilderFee: new(big.Int), ValidationGas: tx.ValidationGas, PaymasterGas: tx.PaymasterGas, + PostOpGas: tx.PostOpGas, } copy(cpy.AccessList, tx.AccessList) if tx.Value != nil { @@ -138,11 +143,14 @@ type Rip7560Transaction struct { Nonce *big.Int ValidationGasLimit *big.Int PaymasterGasLimit *big.Int + PostOpGasLimit *big.Int CallGasLimit *big.Int MaxFeePerGas *big.Int MaxPriorityFeePerGas *big.Int BuilderFee *big.Int + Paymaster *common.Address PaymasterData []byte + Deployer *common.Address DeployerData []byte CallData []byte Signature []byte diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index ed7dfcbaf688..ae935bdef96e 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -78,7 +78,9 @@ type TransactionArgs struct { // Introduced by RIP-7560 Transaction Sender *common.Address `json:"sender"` Signature *hexutil.Bytes - PaymasterData *hexutil.Bytes `json:"paymasterData"` + Paymaster *common.Address `json:"paymaster,omitempty"` + PaymasterData *hexutil.Bytes `json:"paymasterData"` + Deployer *common.Address `json:"deployer,omitempty"` DeployerData *hexutil.Bytes BuilderFee *hexutil.Big ValidationGas *hexutil.Uint64 @@ -499,7 +501,9 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { // RIP-7560 parameters Sender: args.Sender, Signature: *args.Signature, + Paymaster: args.Paymaster, PaymasterData: *args.PaymasterData, + Deployer: args.Deployer, DeployerData: *args.DeployerData, BuilderFee: (*big.Int)(args.BuilderFee), ValidationGas: uint64(*args.ValidationGas), From 57b805187a242ab19dd5a4e5cad26da53e74082d Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 30 May 2024 14:43:42 +0200 Subject: [PATCH 12/73] Remove unused EOA-era function --- core/state_processor_rip7560.go | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 03207bfffc09..9da06cef696e 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -378,27 +378,6 @@ func prepareAccountExecutionMessage(baseTx *types.Transaction, config *params.Ch } } -func prepareEOATargetExecutionMessage(baseTx *types.Transaction) (*Message, error) { - tx := baseTx.Rip7560TransactionData() - if len(tx.Data) < 20 { - return nil, errors.New("RIP-7560 sent by an EOA but the transaction data is too short") - } - var to common.Address = [20]byte(tx.Data[0:20]) - return &Message{ - From: *tx.Sender, - To: &to, - Value: tx.Value, - GasLimit: tx.Gas, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: tx.Data[20:], - AccessList: make(types.AccessList, 0), - SkipAccountChecks: true, - IsRip7560Frame: true, - }, nil -} - func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainConfig, executionResult *ExecutionResult) (*Message, error) { if len(vpr.PaymasterContext) == 0 { return nil, nil From 960cd7f64eab2317e596d83bd1bb4b6fa8e0ea50 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 30 May 2024 15:00:06 +0200 Subject: [PATCH 13/73] Fix comment --- core/state_processor_rip7560.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 9da06cef696e..b5a5ae483fee 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -72,7 +72,7 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta validatedTransactions = append(validatedTransactions, tx) // This is the line separating the Validation and Execution phases - // It should be separated to implement the mempool-friendly AA RIP (number not assigned yet) + // It should be separated to implement the mempool-friendly AA RIP-7711 // for i, vpr := range validationPhaseResults // TODO: this will miss all validation phase events - pass in 'vpr' From 507f4724a75071cb57332317c42d489e9cc72fd0 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 3 Jun 2024 19:24:21 +0300 Subject: [PATCH 14/73] Test ApplyRip7560ValidationPhases (#5) * initial UT for ApplyRip7560ValidationPhases test ./tests/rip7560/ * refactor test context * add some error tests * returnData helper * github actions for rip7560 tests * update path * refactor test utils * refactor textContextBuilder * remove prestate json. * pr comments --- .github/workflows/go.yml | 4 +- .github/workflows/rip7560test.yml | 34 ++++++++ tests/rip7560/rip7560TestUtils.go | 140 ++++++++++++++++++++++++++++++ tests/rip7560/validation_test.go | 87 +++++++++++++++++++ 4 files changed, 263 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/rip7560test.yml create mode 100644 tests/rip7560/rip7560TestUtils.go create mode 100644 tests/rip7560/validation_test.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 0c673d15f168..93ec8236234e 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -2,9 +2,9 @@ name: i386 linux tests on: push: - branches: [ master ] + branches: [ disabled ] pull_request: - branches: [ master ] + branches: [ disabled ] workflow_dispatch: jobs: diff --git a/.github/workflows/rip7560test.yml b/.github/workflows/rip7560test.yml new file mode 100644 index 000000000000..8d1b43f4e8d2 --- /dev/null +++ b/.github/workflows/rip7560test.yml @@ -0,0 +1,34 @@ +name: rip7560-test + +on: + pull_request: + branches: + +jobs: + + build: + name: Run RIP-7560 tests + runs-on: ubuntu-latest + steps: + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.22.1 + + - name: Cache Go dependencies + uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Get dependencies + run: go mod download + + - name: Test + run: go test -v ./tests/rip7560 diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go new file mode 100644 index 000000000000..9a330cbfc3df --- /dev/null +++ b/tests/rip7560/rip7560TestUtils.go @@ -0,0 +1,140 @@ +package rip7560 + +import ( + "bytes" + "context" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/params" + "github.com/status-im/keycard-go/hexutils" + "math/big" + "testing" +) + +const DEFAULT_SENDER = "0x1111111111222222222233333333334444444444" + +type testContext struct { + genesisAlloc types.GenesisAlloc + t *testing.T + chainContext *ethapi.ChainContext + chainConfig *params.ChainConfig + gaspool *core.GasPool + genesis *core.Genesis + genesisBlock *types.Block +} + +func newTestContext(t *testing.T) *testContext { + return newTestContextBuilder(t).build() +} + +type testContextBuilder struct { + t *testing.T + chainConfig *params.ChainConfig + genesisAlloc types.GenesisAlloc +} + +func newTestContextBuilder(t *testing.T) *testContextBuilder { + genesisAlloc := types.GenesisAlloc{} + + chainConfig := params.AllDevChainProtocolChanges + // probably bug in geth.. + chainConfig.PragueTime = chainConfig.CancunTime + + return &testContextBuilder{ + t: t, + chainConfig: chainConfig, + genesisAlloc: genesisAlloc, + } +} + +func (tb *testContextBuilder) build() *testContext { + genesis := &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: tb.genesisAlloc, + } + genesisBlock := genesis.ToBlock() + gaspool := new(core.GasPool).AddGas(genesisBlock.GasLimit()) + + //TODO: fill some mock backend... + var backend ethapi.Backend + + return &testContext{ + t: tb.t, + genesisAlloc: tb.genesisAlloc, + chainContext: ethapi.NewChainContext(context.TODO(), backend), + chainConfig: tb.chainConfig, + genesis: genesis, + genesisBlock: genesisBlock, + gaspool: gaspool, + } +} + +// add EOA account with balance +func (tt *testContextBuilder) withAccount(addr string, balance int64) *testContextBuilder { + tt.genesisAlloc[common.HexToAddress(addr)] = types.Account{Balance: big.NewInt(balance)} + return tt +} + +func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) *testContextBuilder { + if len(code) == 0 { + tt.genesisAlloc[common.HexToAddress(addr)] = types.Account{ + Balance: big.NewInt(balance), + } + } else { + tt.genesisAlloc[common.HexToAddress(addr)] = types.Account{ + Code: code, + Balance: big.NewInt(balance), + } + } + return tt +} + +// generate the code to return the given byte array (up to 32 bytes) +func returnData(data []byte) []byte { + //couldn't get geth to support PUSH0 ... + datalen := len(data) + if datalen == 0 { + data = []byte{0} + } + if datalen > 32 { + panic(fmt.Errorf("data length is too big %v", data)) + } + + PUSHn := byte(int(vm.PUSH0) + datalen) + ret := createCode(PUSHn, data, vm.PUSH1, 0, vm.MSTORE, vm.PUSH1, 32, vm.PUSH1, 0, vm.RETURN) + return ret +} + +// create EVM code from OpCode, byte and []bytes +func createCode(items ...interface{}) []byte { + var buffer bytes.Buffer + + for _, item := range items { + switch v := item.(type) { + case string: + buffer.Write(hexutils.HexToBytes(v)) + case vm.OpCode: + buffer.WriteByte(byte(v)) + case byte: + buffer.WriteByte(v) + case []byte: + buffer.Write(v) + case int8: + buffer.WriteByte(byte(v)) + case int: + if v >= 256 { + panic(fmt.Errorf("int defaults to int8 (byte). int16, etc: %v", v)) + } + buffer.WriteByte(byte(v)) + default: + // should be a compile-time error... + panic(fmt.Errorf("unsupported type: %T", v)) + } + } + + return buffer.Bytes() +} diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go new file mode 100644 index 000000000000..2ab9ca649031 --- /dev/null +++ b/tests/rip7560/validation_test.go @@ -0,0 +1,87 @@ +package rip7560 + +import ( + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/tests" + "github.com/stretchr/testify/assert" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +func TestValidation_OOG(t *testing.T) { + magic := big.NewInt(0xbf45c166) + magic.Lsh(magic, 256-32) + + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), 0), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1), + GasFeeCap: big.NewInt(1000000000), + }, "out of gas") +} + +func TestValidation_ok(t *testing.T) { + magic := big.NewInt(0xbf45c166) + magic.Lsh(magic, 256-32) + + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), 0), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "") +} + +func TestValidation_account_revert(t *testing.T) { + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + createCode(vm.PUSH1, 0, vm.DUP1, vm.REVERT), 0), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "execution reverted") +} + +func TestValidation_account_no_return_value(t *testing.T) { + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, []byte{ + byte(vm.PUSH1), 0, byte(vm.DUP1), byte(vm.RETURN), + }, 0), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "invalid account return data length") +} + +func TestValidation_account_wrong_return_value(t *testing.T) { + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + returnData(createCode(1)), + 0), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "account did not return correct MAGIC_VALUE") +} + +func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionTx, expectedErr string) { + t := tb.build() + if aatx.Sender == nil { + //pre-deployed sender account + Sender := common.HexToAddress(DEFAULT_SENDER) + aatx.Sender = &Sender + } + tx := types.NewTx(&aatx) + + var state = tests.MakePreState(rawdb.NewMemoryDatabase(), t.genesisAlloc, false, rawdb.HashScheme) + defer state.Close() + + _, err := core.ApplyRip7560ValidationPhases(t.chainConfig, t.chainContext, &common.Address{}, t.gaspool, state.StateDB, t.genesisBlock.Header(), tx, vm.Config{}) + // err string or empty if nil + errStr := "" + if err != nil { + errStr = err.Error() + } + assert.Equal(t.t, expectedErr, errStr) +} + +//test failure on non-rip7560 + +//IntrinsicGas: for validation frame, should return the max possible gas. +// - execution should be "free" (and refund the excess) +// geth increment nonce before "call" our validation frame. (in ApplyMessage) From 5ce41a95f0551b2a1dfaf5a9021b0321212c284d Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 4 Jun 2024 19:38:12 +0300 Subject: [PATCH 15/73] merge master (#6) --- .github/CODEOWNERS | 1 + .github/workflows/go.yml | 4 +- .golangci.yml | 6 +- .travis.yml | 56 +- Makefile | 24 +- accounts/keystore/account_cache_test.go | 9 +- accounts/keystore/keystore.go | 7 +- accounts/scwallet/hub.go | 1 + beacon/engine/types.go | 4 +- beacon/light/sync/head_sync_test.go | 2 +- beacon/types/exec_payload.go | 6 +- build/checksums.txt | 115 +- cmd/clef/main.go | 2 +- cmd/devp2p/discv4cmd.go | 57 + cmd/devp2p/internal/ethtest/snap.go | 5 +- cmd/devp2p/internal/v4test/framework.go | 4 +- cmd/evm/internal/t8ntool/block.go | 2 +- cmd/evm/internal/t8ntool/transition.go | 4 +- cmd/evm/t8n_test.go | 8 +- cmd/geth/consolecmd_test.go | 6 +- cmd/geth/dbcmd.go | 10 +- cmd/geth/logging_test.go | 2 + cmd/geth/snapshot.go | 2 +- cmd/geth/testdata/vcheck/vulnerabilities.json | 32 + cmd/geth/verkle.go | 2 +- cmd/utils/flags.go | 10 +- cmd/utils/history_test.go | 3 +- common/math/big_test.go | 4 +- common/math/integer.go | 4 +- consensus/beacon/consensus.go | 2 +- consensus/clique/clique.go | 2 +- consensus/ethash/consensus.go | 2 +- core/block_validator_test.go | 2 - core/blockchain.go | 29 +- core/blockchain_test.go | 61 +- core/bloombits/scheduler.go | 2 +- core/chain_indexer_test.go | 2 +- core/chain_makers.go | 2 +- core/chain_makers_test.go | 3 +- core/error.go | 5 + core/genesis.go | 4 +- core/genesis_test.go | 6 +- core/mkalloc.go | 1 + core/rawdb/accessors_chain.go | 14 +- core/rawdb/accessors_chain_test.go | 2 +- core/rawdb/accessors_indexes_test.go | 2 +- core/rawdb/accessors_trie.go | 125 +- core/rawdb/ancient_scheme.go | 20 +- core/rawdb/ancient_utils.go | 7 +- core/rawdb/ancienttest/testsuite.go | 325 ++++++ core/rawdb/chain_freezer.go | 40 +- core/rawdb/chain_iterator_test.go | 8 +- core/rawdb/database.go | 28 +- core/rawdb/freezer.go | 8 +- core/rawdb/freezer_memory.go | 428 +++++++ core/rawdb/freezer_memory_test.go | 41 + core/rawdb/freezer_resettable.go | 57 +- core/rawdb/freezer_resettable_test.go | 6 +- core/rawdb/freezer_test.go | 20 + core/state/access_events.go | 320 ++++++ core/state/access_events_test.go | 153 +++ core/state/database.go | 24 +- core/state/journal.go | 5 +- core/state/state_object.go | 367 +++--- core/state/statedb.go | 556 ++++----- core/state/statedb_fuzz_test.go | 30 +- core/state/statedb_test.go | 44 + core/state/stateupdate.go | 133 +++ core/state/trie_prefetcher.go | 288 +++-- core/state/trie_prefetcher_test.go | 65 +- core/state_processor.go | 7 + core/state_processor_test.go | 7 +- core/state_transition.go | 18 +- core/tracing/CHANGELOG.md | 12 +- .../gen_balance_change_reason_stringer.go | 37 + core/tracing/hooks.go | 46 +- core/txindexer_test.go | 5 +- core/txpool/legacypool/legacypool_test.go | 2 +- core/types/block.go | 68 +- core/types/block_test.go | 2 +- core/types/transaction.go | 4 +- core/types/transaction_test.go | 2 +- core/vm/common.go | 12 + core/vm/contract.go | 3 + core/vm/contracts.go | 32 +- core/vm/eips.go | 214 ++++ core/vm/evm.go | 60 +- core/vm/gas_table.go | 24 +- core/vm/instructions.go | 13 +- core/vm/instructions_test.go | 2 +- core/vm/interface.go | 5 + core/vm/interpreter.go | 11 + core/vm/jump_table.go | 7 + core/vm/operations_verkle.go | 159 +++ core/vm/runtime/runtime.go | 45 +- core/vm/runtime/runtime_test.go | 6 +- crypto/secp256k1/curve.go | 88 +- crypto/secp256k1/scalar_mult_cgo.go | 2 +- crypto/secp256k1/scalar_mult_nocgo.go | 2 +- eth/backend.go | 6 +- eth/catalyst/api_test.go | 12 +- eth/catalyst/simulated_beacon.go | 3 + eth/downloader/api.go | 2 +- eth/downloader/beaconsync.go | 12 +- eth/downloader/downloader.go | 943 ++------------- eth/downloader/downloader_test.go | 864 ++------------ eth/downloader/fetchers.go | 45 - eth/downloader/fetchers_concurrent.go | 34 +- eth/downloader/fetchers_concurrent_bodies.go | 1 - eth/downloader/fetchers_concurrent_headers.go | 97 -- eth/downloader/modes.go | 15 +- eth/downloader/queue.go | 9 + eth/downloader/skeleton_test.go | 389 ++++--- eth/downloader/testchain_test.go | 1 - eth/gasestimator/gasestimator.go | 10 + eth/gasprice/feehistory.go | 5 + eth/handler.go | 5 +- eth/protocols/snap/gentrie.go | 4 +- eth/protocols/snap/progress_test.go | 2 +- eth/protocols/snap/sync.go | 17 +- eth/protocols/snap/sync_test.go | 4 +- eth/tracers/api.go | 12 +- eth/tracers/api_test.go | 90 +- eth/tracers/internal/tracetest/supply_test.go | 613 ++++++++++ .../frontier_create_outofstorage.json | 3 +- eth/tracers/live/gen_supplyinfoburn.go | 49 + eth/tracers/live/gen_supplyinfoissuance.go | 49 + eth/tracers/live/supply.go | 310 +++++ eth/tracers/logger/logger_json.go | 37 +- eth/tracers/native/call.go | 5 + eth/tracers/native/call_flat.go | 18 +- eth/tracers/native/call_flat_test.go | 64 ++ ethclient/ethclient.go | 7 +- ethdb/database.go | 30 +- ethdb/dbtest/testsuite.go | 4 +- ethdb/leveldb/leveldb.go | 2 +- ethdb/memorydb/memorydb.go | 2 +- ethdb/pebble/pebble.go | 32 +- event/multisub.go | 2 +- go.mod | 20 +- go.sum | 47 +- internal/era/era.go | 2 +- internal/era/iterator.go | 2 +- internal/ethapi/api.go | 7 +- internal/ethapi/api_test.go | 10 +- internal/testlog/testlog.go | 2 +- log/logger_test.go | 10 +- metrics/debug.go | 18 +- metrics/sample_test.go | 19 +- miner/miner.go | 2 +- miner/miner_test.go | 2 +- miner/payload_building_test.go | 2 +- node/api.go | 17 + node/node.go | 3 +- p2p/discover/common.go | 54 +- p2p/discover/lookup.go | 47 +- p2p/discover/metrics.go | 14 +- p2p/discover/node.go | 86 +- p2p/discover/table.go | 800 ++++++------- p2p/discover/table_reval.go | 244 ++++ p2p/discover/table_reval_test.go | 119 ++ p2p/discover/table_test.go | 315 ++--- p2p/discover/table_util_test.go | 146 ++- p2p/discover/v4_lookup_test.go | 27 +- p2p/discover/v4_udp.go | 156 +-- p2p/discover/v4_udp_test.go | 107 +- p2p/discover/v4wire/v4wire.go | 16 +- p2p/discover/v5_talk.go | 6 +- p2p/discover/v5_udp.go | 82 +- p2p/discover/v5_udp_test.go | 93 +- p2p/discover/v5wire/encoding_test.go | 36 +- p2p/enode/idscheme.go | 2 +- p2p/enode/node.go | 137 ++- p2p/enode/node_test.go | 162 +++ p2p/enode/nodedb.go | 12 +- p2p/enode/urlv4.go | 2 +- p2p/enr/entries.go | 55 + p2p/nodestate/nodestate.go | 1023 ----------------- p2p/nodestate/nodestate_test.go | 407 ------- p2p/server.go | 43 +- p2p/simulations/adapters/inproc.go | 12 +- p2p/simulations/adapters/types.go | 1 - p2p/simulations/examples/ping-pong.go | 2 +- params/config.go | 29 +- params/config_test.go | 18 + params/protocol_params.go | 5 + params/verkle_params.go | 36 + params/version.go | 2 +- tests/init.go | 21 +- tests/state_test.go | 8 - tests/testdata | 2 +- trie/hasher.go | 3 +- trie/secure_trie.go | 4 + trie/stacktrie_fuzzer_test.go | 5 +- trie/sync.go | 74 +- trie/trie_test.go | 4 +- trie/trienode/node.go | 17 +- trie/trienode/node_test.go | 61 + trie/triestate/state.go | 3 +- trie/utils/verkle.go | 8 +- trie/utils/verkle_test.go | 2 +- trie/verkle.go | 2 +- triedb/database.go | 9 +- triedb/database/database.go | 3 + triedb/hashdb/database.go | 5 - triedb/pathdb/database.go | 122 +- triedb/pathdb/database_test.go | 4 +- triedb/pathdb/difflayer_test.go | 8 +- triedb/pathdb/disklayer.go | 27 +- triedb/pathdb/history.go | 40 +- triedb/pathdb/history_inspect.go | 12 +- triedb/pathdb/history_test.go | 17 +- triedb/pathdb/journal.go | 18 +- triedb/pathdb/nodebuffer.go | 11 +- 214 files changed, 7392 insertions(+), 5892 deletions(-) create mode 100644 core/rawdb/ancienttest/testsuite.go create mode 100644 core/rawdb/freezer_memory.go create mode 100644 core/rawdb/freezer_memory_test.go create mode 100644 core/state/access_events.go create mode 100644 core/state/access_events_test.go create mode 100644 core/state/stateupdate.go create mode 100644 core/tracing/gen_balance_change_reason_stringer.go create mode 100644 core/vm/operations_verkle.go delete mode 100644 eth/downloader/fetchers_concurrent_headers.go create mode 100644 eth/tracers/internal/tracetest/supply_test.go create mode 100644 eth/tracers/live/gen_supplyinfoburn.go create mode 100644 eth/tracers/live/gen_supplyinfoissuance.go create mode 100644 eth/tracers/live/supply.go create mode 100644 eth/tracers/native/call_flat_test.go create mode 100644 p2p/discover/table_reval.go create mode 100644 p2p/discover/table_reval_test.go delete mode 100644 p2p/nodestate/nodestate.go delete mode 100644 p2p/nodestate/nodestate_test.go create mode 100644 params/verkle_params.go create mode 100644 trie/trienode/node_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index faf922df0161..0dabaf4df5cf 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -10,6 +10,7 @@ core/ @karalabe @holiman @rjl493456442 eth/ @karalabe @holiman @rjl493456442 eth/catalyst/ @gballet eth/tracers/ @s1na +core/tracing/ @s1na graphql/ @s1na les/ @zsfelfoldi @rjl493456442 light/ @zsfelfoldi @rjl493456442 diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 93ec8236234e..6162a3b7dd5b 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -11,9 +11,9 @@ jobs: build: runs-on: self-hosted steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: 1.21.4 - name: Run tests diff --git a/.golangci.yml b/.golangci.yml index 0343c4b4ebf2..46844d1e90b9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,8 +6,6 @@ run: # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true - skip-files: - - core/genesis_alloc.go linters: disable-all: true @@ -26,6 +24,8 @@ linters: - exportloopref - whitespace + ### linters we tried and will not be using: + ### # - structcheck # lots of false positives # - errcheck #lot of false positives # - contextcheck @@ -40,6 +40,8 @@ linters-settings: simplify: true issues: + exclude-files: + - core/genesis_alloc.go exclude-rules: - path: crypto/bn256/cloudflare/optate.go linters: diff --git a/.travis.yml b/.travis.yml index 8c0af291a3df..2dc80f85edf9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,7 @@ jobs: if: type = push os: linux arch: amd64 - dist: bionic + dist: noble go: 1.22.x env: - docker @@ -32,7 +32,7 @@ jobs: if: type = push os: linux arch: arm64 - dist: bionic + dist: noble go: 1.22.x env: - docker @@ -49,21 +49,20 @@ jobs: - stage: build if: type = push os: linux - dist: bionic + dist: noble sudo: required go: 1.22.x env: - azure-linux git: submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - gcc-multilib script: - # Build for the primary platforms that Trusty can manage + # build amd64 - go run build/ci.go install -dlgo - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + + # build 386 + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib - go run build/ci.go install -dlgo -arch 386 - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds @@ -98,48 +97,34 @@ jobs: # These builders run the tests - stage: build + if: type = push os: linux arch: amd64 - dist: bionic + dist: noble go: 1.22.x script: - - travis_wait 30 go run build/ci.go test $TEST_PACKAGES - - - stage: build - if: type = pull_request - os: linux - arch: arm64 - dist: bionic - go: 1.21.x - script: - - travis_wait 30 go run build/ci.go test $TEST_PACKAGES + - travis_wait 45 go run build/ci.go test $TEST_PACKAGES - stage: build + if: type = push os: linux - dist: bionic + dist: noble go: 1.21.x script: - - travis_wait 30 go run build/ci.go test $TEST_PACKAGES + - travis_wait 45 go run build/ci.go test $TEST_PACKAGES # This builder does the Ubuntu PPA nightly uploads - stage: build if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux - dist: bionic + dist: noble go: 1.22.x env: - ubuntu-ppa git: submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - devscripts - - debhelper - - dput - - fakeroot - - python-bzrlib - - python-paramiko + before_install: + - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " @@ -148,7 +133,7 @@ jobs: - stage: build if: type = cron os: linux - dist: bionic + dist: noble go: 1.22.x env: - azure-purge @@ -161,8 +146,9 @@ jobs: - stage: build if: type = cron os: linux - dist: bionic + dist: noble go: 1.22.x + env: + - racetests script: - - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES - + - travis_wait 60 go run build/ci.go test -race $TEST_PACKAGES diff --git a/Makefile b/Makefile index 278ae63120f6..857cb8c97899 100644 --- a/Makefile +++ b/Makefile @@ -2,31 +2,35 @@ # with Go source code. If you know what GOPATH is then you probably # don't need to bother with make. -.PHONY: geth all test lint clean devtools help +.PHONY: geth all test lint fmt clean devtools help GOBIN = ./build/bin GO ?= latest GORUN = go run -#? geth: Build geth +#? geth: Build geth. geth: $(GORUN) build/ci.go install ./cmd/geth @echo "Done building." @echo "Run \"$(GOBIN)/geth\" to launch geth." -#? all: Build all packages and executables +#? all: Build all packages and executables. all: $(GORUN) build/ci.go install -#? test: Run the tests +#? test: Run the tests. test: all $(GORUN) build/ci.go test -#? lint: Run certain pre-selected linters +#? lint: Run certain pre-selected linters. lint: ## Run linters. $(GORUN) build/ci.go lint -#? clean: Clean go cache, built executables, and the auto generated folder +#? fmt: Ensure consistent code formatting. +fmt: + gofmt -s -w $(shell find . -name "*.go") + +#? clean: Clean go cache, built executables, and the auto generated folder. clean: go clean -cache rm -fr build/_workspace/pkg/ $(GOBIN)/* @@ -34,7 +38,7 @@ clean: # The devtools target installs tools required for 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. -#? devtools: Install recommended developer tools +#? devtools: Install recommended developer tools. devtools: env GOBIN= go install golang.org/x/tools/cmd/stringer@latest env GOBIN= go install github.com/fjl/gencodec@latest @@ -45,5 +49,9 @@ devtools: #? help: Get more info on make commands. help: Makefile - @echo " Choose a command run in go-ethereum:" + @echo '' + @echo 'Usage:' + @echo ' make [target]' + @echo '' + @echo 'Targets:' @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index 1a9f9a4714cc..6bc14f5bb6d1 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -326,6 +326,11 @@ func TestUpdatedKeyfileContents(t *testing.T) { // Create a temporary keystore to test with dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) + + // Create the directory + os.MkdirAll(dir, 0700) + defer os.RemoveAll(dir) + ks := NewKeyStore(dir, LightScryptN, LightScryptP) list := ks.Accounts() @@ -335,9 +340,7 @@ func TestUpdatedKeyfileContents(t *testing.T) { if !waitWatcherStart(ks) { t.Fatal("keystore watcher didn't start in time") } - // Create the directory and copy a key file into it. - os.MkdirAll(dir, 0700) - defer os.RemoveAll(dir) + // Copy a key file into it file := filepath.Join(dir, "aaa") // Place one of our testfiles in there diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 5c978cf0b422..df3dda60b656 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -312,11 +312,10 @@ func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error { // Lock removes the private key with the given address from memory. func (ks *KeyStore) Lock(addr common.Address) error { ks.mu.Lock() - if unl, found := ks.unlocked[addr]; found { - ks.mu.Unlock() + unl, found := ks.unlocked[addr] + ks.mu.Unlock() + if found { ks.expire(addr, unl, time.Duration(0)*time.Nanosecond) - } else { - ks.mu.Unlock() } return nil } diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index 5f1f369ca2a0..1b1899dc8e8e 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -95,6 +95,7 @@ func (hub *Hub) readPairings() error { } return err } + defer pairingFile.Close() pairingData, err := io.ReadAll(pairingFile) if err != nil { diff --git a/beacon/engine/types.go b/beacon/engine/types.go index fc77c13af707..1dfcf5b71a0e 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -209,7 +209,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) { return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas) } - var blobHashes []common.Hash + var blobHashes = make([]common.Hash, 0, len(txs)) for _, tx := range txs { blobHashes = append(blobHashes, tx.BlobHashes()...) } @@ -250,7 +250,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, BlobGasUsed: params.BlobGasUsed, ParentBeaconRoot: beaconRoot, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: params.Withdrawals}) if block.Hash() != params.BlockHash { return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash()) } diff --git a/beacon/light/sync/head_sync_test.go b/beacon/light/sync/head_sync_test.go index cd7dacf7fe7d..d095d6a4466e 100644 --- a/beacon/light/sync/head_sync_test.go +++ b/beacon/light/sync/head_sync_test.go @@ -91,7 +91,7 @@ func TestValidatedHead(t *testing.T) { ts.ServerEvent(EvNewOptimisticUpdate, testServer3, testOptUpdate4) // finality should be requested from both servers ts.Run(4, testServer1, ReqFinality{}, testServer3, ReqFinality{}) - // future period annonced heads should be queued + // future period announced heads should be queued ht.ExpValidated(t, 4, nil) chain.SetNextSyncPeriod(2) diff --git a/beacon/types/exec_payload.go b/beacon/types/exec_payload.go index 718f98f5292e..b159687dfcc5 100644 --- a/beacon/types/exec_payload.go +++ b/beacon/types/exec_payload.go @@ -63,11 +63,9 @@ func convertPayload[T payloadType](payload T, parentRoot *zrntcommon.Root) (*typ panic("unsupported block type") } - block := types.NewBlockWithHeader(&header) - block = block.WithBody(transactions, nil) - block = block.WithWithdrawals(withdrawals) + block := types.NewBlockWithHeader(&header).WithBody(types.Body{Transactions: transactions, Withdrawals: withdrawals}) if hash := block.Hash(); hash != expectedHash { - return nil, fmt.Errorf("Sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash) + return nil, fmt.Errorf("sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash) } return block, nil } diff --git a/build/checksums.txt b/build/checksums.txt index 767fc88ce59f..94cac9738c1e 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -5,53 +5,80 @@ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz -# version:golang 1.22.2 +# version:golang 1.22.3 # https://go.dev/dl/ -374ea82b289ec738e968267cac59c7d5ff180f9492250254784b2044e90df5a9 go1.22.2.src.tar.gz -33e7f63077b1c5bce4f1ecadd4d990cf229667c40bfb00686990c950911b7ab7 go1.22.2.darwin-amd64.tar.gz -660298be38648723e783ba0398e90431de1cb288c637880cdb124f39bd977f0d go1.22.2.darwin-arm64.tar.gz -efc7162b0cad2f918ac566a923d4701feb29dc9c0ab625157d49b1cbcbba39da go1.22.2.freebsd-386.tar.gz -d753428296e6709527e291fd204700a587ffef2c0a472b21aebea11618245929 go1.22.2.freebsd-amd64.tar.gz -586d9eb7fe0489ab297ad80dd06414997df487c5cf536c490ffeaa8d8f1807a7 go1.22.2.linux-386.tar.gz -5901c52b7a78002aeff14a21f93e0f064f74ce1360fce51c6ee68cd471216a17 go1.22.2.linux-amd64.tar.gz -36e720b2d564980c162a48c7e97da2e407dfcc4239e1e58d98082dfa2486a0c1 go1.22.2.linux-arm64.tar.gz -9243dfafde06e1efe24d59df6701818e6786b4adfdf1191098050d6d023c5369 go1.22.2.linux-armv6l.tar.gz -251a8886c5113be6490bdbb955ddee98763b49c9b1bf4c8364c02d3b482dab00 go1.22.2.linux-ppc64le.tar.gz -2b39019481c28c560d65e9811a478ae10e3ef765e0f59af362031d386a71bfef go1.22.2.linux-s390x.tar.gz -651753c06df037020ef4d162c5b273452e9ba976ed17ae39e66ef7ee89d8147e go1.22.2.windows-386.zip -8e581cf330f49d3266e936521a2d8263679ef7e2fc2cbbceb85659122d883596 go1.22.2.windows-amd64.zip -ddfca5beb9a0c62254266c3090c2555d899bf3e7aa26243e7de3621108f06875 go1.22.2.windows-arm64.zip +80648ef34f903193d72a59c0dff019f5f98ae0c9aa13ade0b0ecbff991a76f68 go1.22.3.src.tar.gz +adc9f5fee89cd53d907eb542d3b269d9d8a08a66bf1ab42175450ffbb58733fb go1.22.3.aix-ppc64.tar.gz +610e48c1df4d2f852de8bc2e7fd2dc1521aac216f0c0026625db12f67f192024 go1.22.3.darwin-amd64.tar.gz +02abeab3f4b8981232237ebd88f0a9bad933bc9621791cd7720a9ca29eacbe9d go1.22.3.darwin-arm64.tar.gz +a5b3d54905f17af2ceaf7fcfe92edee67a5bd4eccd962dd89df719ace3e0894d go1.22.3.dragonfly-amd64.tar.gz +b9989ca87695ae93bacde6f3aa7b13cde5f3825515eb9ed9bbef014273739889 go1.22.3.freebsd-386.tar.gz +7483961fae29d7d768afd5c9c0f229354ca3263ab7119c20bc182761f87cbc74 go1.22.3.freebsd-amd64.tar.gz +edf1f0b8ecf68b14faeedb4f5d868a58c4777a0282bd85e5115c39c010cd0130 go1.22.3.freebsd-arm.tar.gz +572eb70e5e835fbff7d53ebf473f611d7eb458c428f8dbd98a49196883c3309e go1.22.3.freebsd-arm64.tar.gz +ef94eb2b74402e436dce970584222c4e454eb3093908591149bd2ded6862b8af go1.22.3.freebsd-riscv64.tar.gz +3c3f498c68334cbd11f72aadfb6bcb507eb8436cebc50f437a0523cd4c5e03d1 go1.22.3.illumos-amd64.tar.gz +fefba30bb0d3dd1909823ee38c9f1930c3dc5337a2ac4701c2277a329a386b57 go1.22.3.linux-386.tar.gz +8920ea521bad8f6b7bc377b4824982e011c19af27df88a815e3586ea895f1b36 go1.22.3.linux-amd64.tar.gz +6c33e52a5b26e7aa021b94475587fce80043a727a54ceb0eee2f9fc160646434 go1.22.3.linux-arm64.tar.gz +f2bacad20cd2b96f23a86d4826525d42b229fd431cc6d0dec61ff3bc448ef46e go1.22.3.linux-armv6l.tar.gz +41e9328340544893482b2928ae18a9a88ba18b2fdd29ac77f4d33cf1815bbdc2 go1.22.3.linux-loong64.tar.gz +cf4d5faff52e642492729eaf396968f43af179518be769075b90bc1bf650abf6 go1.22.3.linux-mips.tar.gz +3bd009fe2e3d2bfd52433a11cb210d1dfa50b11b4c347a293951efd9e36de945 go1.22.3.linux-mips64.tar.gz +5913b82a042188ef698f7f2dfd0cd0c71f0508a4739de9e41fceff3f4dc769b4 go1.22.3.linux-mips64le.tar.gz +441afebca555be5313867b4577f237c7b5c0fff4386e22e47875b9f805abbec5 go1.22.3.linux-mipsle.tar.gz +f3b53190a76f4a35283501ba6d94cbb72093be0c62ff735c6f9e586a1c983381 go1.22.3.linux-ppc64.tar.gz +04b7b05283de30dd2da20bf3114b2e22cc727938aed3148babaf35cc951051ac go1.22.3.linux-ppc64le.tar.gz +d4992d4a85696e3f1de06cefbfc2fd840c9c6695d77a0f35cfdc4e28b2121c20 go1.22.3.linux-riscv64.tar.gz +2aba796417a69be5f3ed489076bac79c1c02b36e29422712f9f3bf51da9cf2d4 go1.22.3.linux-s390x.tar.gz +d6e6113542dd9f23db899e177fe23772bac114a5ea5e8ee436b9da68628335a8 go1.22.3.netbsd-386.tar.gz +c33cee3075bd18ceefddd75bafa8efb51fbdc17b5ee74275122e7a927a237a4c go1.22.3.netbsd-amd64.tar.gz +1ab251df3c85f3b391a09565ca52fb6e1306527d72852d553e9ab74eabb4ecf8 go1.22.3.netbsd-arm.tar.gz +1d194fe53f5d82f9a612f848950d8af8cab7cb40ccc03f10c4eb1c9808ff1a0c go1.22.3.netbsd-arm64.tar.gz +91d6601727f08506e938640885d3ded784925045e3a4444fd9b4b936efe1b1e0 go1.22.3.openbsd-386.tar.gz +09d0c91ae35a4eea92615426992062ca236cc2f66444fb0b0a24cd3b13bd5297 go1.22.3.openbsd-amd64.tar.gz +338da30cc2c97b9458e0b4caa2509f67bba55d3de16fb7d31775baca82d2e3dc go1.22.3.openbsd-arm.tar.gz +53eadfabd2b7dd09a64941421afee2a2888e2a4f94f353b27919b1dad1171a21 go1.22.3.openbsd-arm64.tar.gz +8a1a2842ae8dcf2374bb05dff58074b368bb698dc9c211c794c1ff119cd9fdc7 go1.22.3.plan9-386.tar.gz +f9816d3dd9e730cad55085ea08c1f0c925720728f9c945fff59cd24d2ac2db7b go1.22.3.plan9-amd64.tar.gz +f4d3d7b17c9e1b1635fcb287b5b5ab5b60acc9db3ba6a27f2b2f5d6537a2ef95 go1.22.3.plan9-arm.tar.gz +46b7999ee94d91b21ad6940b5a3131ff6fe53ef97be9a34e582e2a3ad7263e95 go1.22.3.solaris-amd64.tar.gz +f60f63b8a0885e0d924f39fd284aee5438fe87d8c3d8545a312adf43e0d9edac go1.22.3.windows-386.zip +cab2af6951a6e2115824263f6df13ff069c47270f5788714fa1d776f7f60cb39 go1.22.3.windows-amd64.zip +40b37f4b068fc759f3a0dd61176a0f7570a4ba48bed8561c31d3967a3583981a go1.22.3.windows-arm.zip +59b76ee22b9b1c3afbf7f50e3cb4edb954d6c0d25e5e029ab5483a6804d61e71 go1.22.3.windows-arm64.zip -# version:golangci 1.55.2 +# version:golangci 1.59.0 # https://github.com/golangci/golangci-lint/releases/ -# https://github.com/golangci/golangci-lint/releases/download/v1.55.2/ -632e96e6d5294fbbe7b2c410a49c8fa01c60712a0af85a567de85bcc1623ea21 golangci-lint-1.55.2-darwin-amd64.tar.gz -234463f059249f82045824afdcdd5db5682d0593052f58f6a3039a0a1c3899f6 golangci-lint-1.55.2-darwin-arm64.tar.gz -2bdd105e2d4e003a9058c33a22bb191a1e0f30fa0790acca0d8fbffac1d6247c golangci-lint-1.55.2-freebsd-386.tar.gz -e75056e8b082386676ce23eba455cf893931a792c0d87e1e3743c0aec33c7fb5 golangci-lint-1.55.2-freebsd-amd64.tar.gz -5789b933facaf6136bd23f1d50add67b79bbcf8dfdfc9069a37f729395940a66 golangci-lint-1.55.2-freebsd-armv6.tar.gz -7f21ab1008d05f32c954f99470fc86a83a059e530fe2add1d0b7d8ed4d8992a7 golangci-lint-1.55.2-freebsd-armv7.tar.gz -33ab06139b9219a28251f10821da94423db30285cc2af97494cbb2a281927de9 golangci-lint-1.55.2-illumos-amd64.tar.gz -57ce6f8ce3ad6ee45d7cc3d9a047545a851c2547637834a3fcb086c7b40b1e6b golangci-lint-1.55.2-linux-386.tar.gz -ca21c961a33be3bc15e4292dc40c98c8dcc5463a7b6768a3afc123761630c09c golangci-lint-1.55.2-linux-amd64.tar.gz -8eb0cee9b1dbf0eaa49871798c7f8a5b35f2960c52d776a5f31eb7d886b92746 golangci-lint-1.55.2-linux-arm64.tar.gz -3195f3e0f37d353fd5bd415cabcd4e263f5c29d3d0ffb176c26ff3d2c75eb3bb golangci-lint-1.55.2-linux-armv6.tar.gz -c823ee36eb1a719e171de1f2f5ca3068033dce8d9817232fd10ed71fd6650406 golangci-lint-1.55.2-linux-armv7.tar.gz -758a5d2a356dc494bd13ed4c0d4bf5a54a4dc91267ea5ecdd87b86c7ca0624e7 golangci-lint-1.55.2-linux-loong64.tar.gz -2c7b9abdce7cae802a67d583cd7c6dca520bff6d0e17c8535a918e2f2b437aa0 golangci-lint-1.55.2-linux-mips64.tar.gz -024e0a15b85352cc27271285526e16a4ab66d3e67afbbe446c9808c06cb8dbed golangci-lint-1.55.2-linux-mips64le.tar.gz -6b00f89ba5506c1de1efdd9fa17c54093013a294fefd8b9b31534db626a672ee golangci-lint-1.55.2-linux-ppc64le.tar.gz -0faa0d047d9bf7b703ed3ea65b6117043c93504f9ca1de25ae929d3901c73d4a golangci-lint-1.55.2-linux-riscv64.tar.gz -30dec9b22e7d5bb4e9d5ccea96da20f71cd7db3c8cf30b8ddc7cb9174c4d742a golangci-lint-1.55.2-linux-s390x.tar.gz -5a0ede48f79ad707902fdb29be8cd2abd8302dc122b65ebae3fdfc86751c7698 golangci-lint-1.55.2-netbsd-386.tar.gz -95af20a2e617126dd5b08122ece7819101070e1582a961067ce8c41172f901ad golangci-lint-1.55.2-netbsd-amd64.tar.gz -94fb7dacb7527847cc95d7120904e19a2a0a81a0d50d61766c9e0251da72ab9d golangci-lint-1.55.2-netbsd-armv6.tar.gz -ca906bce5fee9619400e4a321c56476fe4a4efb6ac4fc989d340eb5563348873 golangci-lint-1.55.2-netbsd-armv7.tar.gz -45b442f69fc8915c4500201c0247b7f3f69544dbc9165403a61f9095f2c57355 golangci-lint-1.55.2-windows-386.zip -f57d434d231d43417dfa631587522f8c1991220b43c8ffadb9c7bd279508bf81 golangci-lint-1.55.2-windows-amd64.zip -fd7dc8f4c6829ee6fafb252a4d81d2155cd35da7833665cbb25d53ce7cecd990 golangci-lint-1.55.2-windows-arm64.zip -1892c3c24f9e7ef44b02f6750c703864b6dc350129f3ec39510300007b2376f1 golangci-lint-1.55.2-windows-armv6.zip -a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint-1.55.2-windows-armv7.zip +# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/ +418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz +5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz +8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz +658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz +4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz +ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz +439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz +940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz +3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz +c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz +93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz +d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz +047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz +5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz +71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz +6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz +af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz +a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz +68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz +d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz +83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz +6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz +11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz +466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip +3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip +b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip +6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip +3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip # This is the builder on PPA that will build Go itself (inception-y), don't modify! # diff --git a/cmd/clef/main.go b/cmd/clef/main.go index f9b00e4a12a0..88d4c99e785a 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -552,7 +552,7 @@ func listWallets(c *cli.Context) error { // accountImport imports a raw hexadecimal private key via CLI. func accountImport(c *cli.Context) error { if c.Args().Len() != 1 { - return errors.New(" must be given as first argument.") + return errors.New(" must be given as first argument") } internalApi, ui, err := initInternalApi(c) if err != nil { diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 45bcdcd3674b..3b5400ca3a83 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "net" + "net/http" "strconv" "strings" "time" @@ -28,9 +29,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/flags" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/urfave/cli/v2" ) @@ -45,6 +48,7 @@ var ( discv4ResolveJSONCommand, discv4CrawlCommand, discv4TestCommand, + discv4ListenCommand, }, } discv4PingCommand = &cli.Command{ @@ -75,6 +79,14 @@ var ( Flags: discoveryNodeFlags, ArgsUsage: "", } + discv4ListenCommand = &cli.Command{ + Name: "listen", + Usage: "Runs a discovery node", + Action: discv4Listen, + Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{ + httpAddrFlag, + }), + } discv4CrawlCommand = &cli.Command{ Name: "crawl", Usage: "Updates a nodes.json file with random nodes found in the DHT", @@ -131,6 +143,10 @@ var ( Usage: "Enode of the remote node under test", EnvVars: []string{"REMOTE_ENODE"}, } + httpAddrFlag = &cli.StringFlag{ + Name: "rpc", + Usage: "HTTP server listening address", + } ) var discoveryNodeFlags = []cli.Flag{ @@ -154,6 +170,27 @@ func discv4Ping(ctx *cli.Context) error { return nil } +func discv4Listen(ctx *cli.Context) error { + disc, _ := startV4(ctx) + defer disc.Close() + + fmt.Println(disc.Self()) + + httpAddr := ctx.String(httpAddrFlag.Name) + if httpAddr == "" { + // Non-HTTP mode. + select {} + } + + api := &discv4API{disc} + log.Info("Starting RPC API server", "addr", httpAddr) + srv := rpc.NewServer() + srv.RegisterName("discv4", api) + http.DefaultServeMux.Handle("/", srv) + httpsrv := http.Server{Addr: httpAddr, Handler: http.DefaultServeMux} + return httpsrv.ListenAndServe() +} + func discv4RequestRecord(ctx *cli.Context) error { n := getNodeArg(ctx) disc, _ := startV4(ctx) @@ -362,3 +399,23 @@ func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { } return nodes, nil } + +type discv4API struct { + host *discover.UDPv4 +} + +func (api *discv4API) LookupRandom(n int) (ns []*enode.Node) { + it := api.host.RandomNodes() + for len(ns) < n && it.Next() { + ns = append(ns, it.Node()) + } + return ns +} + +func (api *discv4API) Buckets() [][]discover.BucketNode { + return api.host.TableBuckets() +} + +func (api *discv4API) Self() *enode.Node { + return api.host.Self() +} diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index 8ff3f1f71a6e..4f1b6f86562a 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) func (c *Conn) snapRequest(code uint64, msg any) (any, error) { @@ -905,7 +904,7 @@ func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error { // that the serving node is missing var ( bytecodes = res.Codes - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) codes = make([][]byte, len(req.Hashes)) ) @@ -964,7 +963,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { // Cross reference the requested trienodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) trienodes := res.Nodes if got, want := len(trienodes), len(tc.expHashes); got != want { diff --git a/cmd/devp2p/internal/v4test/framework.go b/cmd/devp2p/internal/v4test/framework.go index 92865941810b..df1f1f8abaca 100644 --- a/cmd/devp2p/internal/v4test/framework.go +++ b/cmd/devp2p/internal/v4test/framework.go @@ -62,7 +62,7 @@ func newTestEnv(remote string, listen1, listen2 string) *testenv { if tcpPort = node.TCP(); tcpPort == 0 { tcpPort = 30303 } - if udpPort = node.TCP(); udpPort == 0 { + if udpPort = node.UDP(); udpPort == 0 { udpPort = 30303 } node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort) @@ -110,7 +110,7 @@ func (te *testenv) localEndpoint(c net.PacketConn) v4wire.Endpoint { } func (te *testenv) remoteEndpoint() v4wire.Endpoint { - return v4wire.NewEndpoint(te.remoteAddr, 0) + return v4wire.NewEndpoint(te.remoteAddr.AddrPort(), 0) } func contains(ns []v4wire.Node, key v4wire.Pubkey) bool { diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 62c8593a1d47..37a6db9ffcde 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -160,7 +160,7 @@ func (i *bbInput) ToBlock() *types.Block { if i.Header.Difficulty != nil { header.Difficulty = i.Header.Difficulty } - return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) + return types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: i.Txs, Uncles: i.Ommers, Withdrawals: i.Withdrawals}) } // SealBlock seals the given block using the configured engine. diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 2b5eaa65aae1..8fd068b2ea14 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -217,7 +217,7 @@ func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error { return nil } if env.ParentBaseFee == nil || env.Number == 0 { - return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'parentBaseFee' in env section")) } env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{ Number: new(big.Int).SetUint64(env.Number - 1), @@ -296,7 +296,7 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) { balance, _ := new(big.Int).SetString(dumpAccount.Balance, 0) var storage map[common.Hash]common.Hash if dumpAccount.Storage != nil { - storage = make(map[common.Hash]common.Hash) + storage = make(map[common.Hash]common.Hash, len(dumpAccount.Storage)) for k, v := range dumpAccount.Storage { storage[k] = common.HexToHash(v) } diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 5a74491c3b2d..76ebc420ec6c 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -234,7 +234,7 @@ func TestT8n(t *testing.T) { { // Test post-merge transition base: "./testdata/24", input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", + "alloc.json", "txs.json", "env.json", "Paris", "", }, output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", @@ -242,7 +242,7 @@ func TestT8n(t *testing.T) { { // Test post-merge transition where input is missing random base: "./testdata/24", input: t8nInput{ - "alloc.json", "txs.json", "env-missingrandom.json", "Merge", "", + "alloc.json", "txs.json", "env-missingrandom.json", "Paris", "", }, output: t8nOutput{alloc: false, result: false}, expExitCode: 3, @@ -250,7 +250,7 @@ func TestT8n(t *testing.T) { { // Test base fee calculation base: "./testdata/25", input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", + "alloc.json", "txs.json", "env.json", "Paris", "", }, output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", @@ -378,7 +378,7 @@ func TestT8nTracing(t *testing.T) { { base: "./testdata/32", input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", + "alloc.json", "txs.json", "env.json", "Paris", "", }, extraArgs: []string{"--trace", "--trace.callframes"}, expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"}, diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 4d6220641703..33d6d4bbc421 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -103,17 +103,17 @@ func TestAttachWelcome(t *testing.T) { "--http", "--http.port", httpPort, "--ws", "--ws.port", wsPort) t.Run("ipc", func(t *testing.T) { - waitForEndpoint(t, ipc, 3*time.Second) + waitForEndpoint(t, ipc, 4*time.Second) testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) }) t.Run("http", func(t *testing.T) { endpoint := "http://127.0.0.1:" + httpPort - waitForEndpoint(t, endpoint, 3*time.Second) + waitForEndpoint(t, endpoint, 4*time.Second) testAttachWelcome(t, geth, endpoint, httpAPIs) }) t.Run("ws", func(t *testing.T) { endpoint := "ws://127.0.0.1:" + wsPort - waitForEndpoint(t, endpoint, 3*time.Second) + waitForEndpoint(t, endpoint, 4*time.Second) testAttachWelcome(t, geth, endpoint, httpAPIs) }) geth.Kill() diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 4e91a4ff25ed..742eadd5f368 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -246,11 +246,17 @@ func removeDB(ctx *cli.Context) error { ancientDir = config.Node.ResolvePath(ancientDir) } // Delete state data - statePaths := []string{rootDir, filepath.Join(ancientDir, rawdb.StateFreezerName)} + statePaths := []string{ + rootDir, + filepath.Join(ancientDir, rawdb.StateFreezerName), + } confirmAndRemoveDB(statePaths, "state data", ctx, removeStateDataFlag.Name) // Delete ancient chain - chainPaths := []string{filepath.Join(ancientDir, rawdb.ChainFreezerName)} + chainPaths := []string{filepath.Join( + ancientDir, + rawdb.ChainFreezerName, + )} confirmAndRemoveDB(chainPaths, "ancient chain", ctx, removeChainDataFlag.Name) return nil } diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go index b5ce03f4b8db..f426b138bb67 100644 --- a/cmd/geth/logging_test.go +++ b/cmd/geth/logging_test.go @@ -73,6 +73,7 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) { if err != nil { t.Fatal(err) } + defer readFile.Close() wantLines := split(readFile) haveLines := split(bytes.NewBuffer(haveB)) for i, want := range wantLines { @@ -109,6 +110,7 @@ func TestJsonLogging(t *testing.T) { if err != nil { t.Fatal(err) } + defer readFile.Close() wantLines := split(readFile) haveLines := split(bytes.NewBuffer(haveB)) for i, wantLine := range wantLines { diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 192c850868c8..cf7093e60560 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -91,7 +91,7 @@ data, and verifies that all snapshot storage data has a corresponding account. }, { Name: "inspect-account", - Usage: "Check all snapshot layers for the a specific account", + Usage: "Check all snapshot layers for the specific account", ArgsUsage: "
", Action: checkAccount, Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), diff --git a/cmd/geth/testdata/vcheck/vulnerabilities.json b/cmd/geth/testdata/vcheck/vulnerabilities.json index bee0e66dd8e5..31a34de6beb2 100644 --- a/cmd/geth/testdata/vcheck/vulnerabilities.json +++ b/cmd/geth/testdata/vcheck/vulnerabilities.json @@ -166,5 +166,37 @@ "severity": "Low", "CVE": "CVE-2022-29177", "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16)-.*)$" + }, + { + "name": "DoS via malicious p2p message", + "uid": "GETH-2023-01", + "summary": "A vulnerable node can be made to consume unbounded amounts of memory when handling specially crafted p2p messages sent from an attacker node.", + "description": "The p2p handler spawned a new goroutine to respond to ping requests. By flooding a node with ping requests, an unbounded number of goroutines can be created, leading to resource exhaustion and potentially crash due to OOM.", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-ppjg-v974-84cm", + "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" + ], + "introduced": "v1.10.0", + "fixed": "v1.12.1", + "published": "2023-09-06", + "severity": "High", + "CVE": "CVE-2023-40591", + "check": "(Geth\\/v1\\.(10|11)\\..*)|(Geth\\/v1\\.12\\.0-.*)$" + }, + { + "name": "DoS via malicious p2p message", + "uid": "GETH-2024-01", + "summary": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node.", + "description": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node. Full details will be available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652)", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652", + "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" + ], + "introduced": "v1.10.0", + "fixed": "v1.13.15", + "published": "2024-05-06", + "severity": "High", + "CVE": "CVE-2024-32972", + "check": "(Geth\\/v1\\.(10|11|12)\\..*)|(Geth\\/v1\\.13\\.\\d-.*)|(Geth\\/v1\\.13\\.1(0|1|2|3|4)-.*)$" } ] diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index ff3931356e8f..9eb37fb5a875 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/urfave/cli/v2" ) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e1c33678be37..ecf6acc18606 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1872,13 +1872,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { Fatalf("Could not read genesis from database: %v", err) } if !genesis.Config.TerminalTotalDifficultyPassed { - Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true in developer mode") + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true") } if genesis.Config.TerminalTotalDifficulty == nil { - Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified.") + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified") + } else if genesis.Config.TerminalTotalDifficulty.Cmp(big.NewInt(0)) != 0 { + Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be 0") } - if genesis.Difficulty.Cmp(genesis.Config.TerminalTotalDifficulty) != 1 { - Fatalf("Bad developer-mode genesis configuration: genesis block difficulty must be > terminalTotalDifficulty") + if genesis.Difficulty.Cmp(big.NewInt(0)) != 0 { + Fatalf("Bad developer-mode genesis configuration: difficulty must be 0") } } chaindb.Close() diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go index b6703c59ed32..a631eaf49036 100644 --- a/cmd/utils/history_test.go +++ b/cmd/utils/history_test.go @@ -162,8 +162,7 @@ func TestHistoryImportAndExport(t *testing.T) { } // Now import Era. - freezer := t.TempDir() - db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { panic(err) } diff --git a/common/math/big_test.go b/common/math/big_test.go index 803b5e1cc617..ee8f09e7b45f 100644 --- a/common/math/big_test.go +++ b/common/math/big_test.go @@ -180,9 +180,9 @@ func BenchmarkByteAtOld(b *testing.B) { func TestReadBits(t *testing.T) { check := func(input string) { want, _ := hex.DecodeString(input) - int, _ := new(big.Int).SetString(input, 16) + n, _ := new(big.Int).SetString(input, 16) buf := make([]byte, len(want)) - ReadBits(int, buf) + ReadBits(n, buf) if !bytes.Equal(buf, want) { t.Errorf("have: %x\nwant: %x", buf, want) } diff --git a/common/math/integer.go b/common/math/integer.go index da01c0a08e00..080fba8fea89 100644 --- a/common/math/integer.go +++ b/common/math/integer.go @@ -54,11 +54,11 @@ func (i *HexOrDecimal64) UnmarshalJSON(input []byte) error { // UnmarshalText implements encoding.TextUnmarshaler. func (i *HexOrDecimal64) UnmarshalText(input []byte) error { - int, ok := ParseUint64(string(input)) + n, ok := ParseUint64(string(input)) if !ok { return fmt.Errorf("invalid hex or decimal integer %q", input) } - *i = HexOrDecimal64(int) + *i = HexOrDecimal64(n) return nil } diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 4e3fbeb09a7c..b8946e0c7109 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -388,7 +388,7 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(true) // Assemble and return the final block. - return types.NewBlockWithWithdrawals(header, body.Transactions, body.Uncles, receipts, body.Withdrawals, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), nil } // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index b5727fc666d5..c9e94840020a 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -597,7 +597,7 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header * header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Assemble and return the final block for sealing. - return types.NewBlock(header, body.Transactions, nil, receipts, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 9800bf928882..b5e2754c2d94 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -520,7 +520,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock(header, body.Transactions, body.Uncles, receipts, trie.NewStackTrie(nil)), nil + return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 2f86b2d751b8..c573ef91faca 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -154,12 +154,10 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) { preHeaders := make([]*types.Header, len(preBlocks)) for i, block := range preBlocks { preHeaders[i] = block.Header() - t.Logf("Pre-merge header: %d", block.NumberU64()) } postHeaders := make([]*types.Header, len(postBlocks)) for i, block := range postBlocks { postHeaders[i] = block.Header() - t.Logf("Post-merge header: %d", block.NumberU64()) } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) diff --git a/core/blockchain.go b/core/blockchain.go index e4c89668245f..7c8ab3abc44a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -100,7 +100,6 @@ const ( blockCacheLimit = 256 receiptsCacheLimit = 32 txLookupCacheLimit = 1024 - TriesInMemory = 128 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // @@ -1128,7 +1127,7 @@ func (bc *BlockChain) Stop() { if !bc.cacheConfig.TrieDirtyDisabled { triedb := bc.triedb - for _, offset := range []uint64{0, 1, TriesInMemory - 1} { + for _, offset := range []uint64{0, 1, state.TriesInMemory - 1} { if number := bc.CurrentBlock().Number.Uint64(); number > offset { recent := bc.GetBlockByNumber(number - offset) @@ -1309,7 +1308,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Delete block data from the main database. var ( batch = bc.db.NewBatch() - canonHashes = make(map[common.Hash]struct{}) + canonHashes = make(map[common.Hash]struct{}, len(blockChain)) ) for _, block := range blockChain { canonHashes[block.Hash()] = struct{}{} @@ -1452,7 +1451,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error { // writeBlockWithState writes block, metadata and corresponding state data to the // database. -func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error { +func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error { // Calculate the total difficulty of the block ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) if ptd == nil { @@ -1469,12 +1468,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) rawdb.WriteBlock(blockBatch, block) rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) - rawdb.WritePreimages(blockBatch, state.Preimages()) + rawdb.WritePreimages(blockBatch, statedb.Preimages()) if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } // Commit all cached state changes into underlying memory database. - root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) + root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) if err != nil { return err } @@ -1493,7 +1492,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Flush limits are not considered for the first TriesInMemory blocks. current := block.NumberU64() - if current <= TriesInMemory { + if current <= state.TriesInMemory { return nil } // If we exceeded our memory allowance, flush matured singleton nodes to disk @@ -1505,7 +1504,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.triedb.Cap(limit - ethdb.IdealBatchSize) } // Find the next state trie we need to commit - chosen := current - TriesInMemory + chosen := current - state.TriesInMemory flushInterval := time.Duration(bc.flushInterval.Load()) // If we exceeded time allowance, flush an entire trie to disk if bc.gcproc > flushInterval { @@ -1517,8 +1516,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } else { // If we're exceeding limits but haven't reached a large enough memory gap, // warn the user that the system is becoming unstable. - if chosen < bc.lastWrite+TriesInMemory && bc.gcproc >= 2*flushInterval { - log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory) + if chosen < bc.lastWrite+state.TriesInMemory && bc.gcproc >= 2*flushInterval { + log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/state.TriesInMemory) } // Flush an entire trie and restart the counters bc.triedb.Commit(header.Root, true) @@ -1806,8 +1805,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) } statedb.SetLogger(bc.logger) - // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + // If we are past Byzantium, enable prefetching to pull in trie node paths + // while processing transactions. Before Byzantium the prefetcher is mostly + // useless due to the intermediate root hashing after each transaction. + if bc.chainConfig.IsByzantium(block.Number()) { + statedb.StartPrefetcher("chain") + } activeState = statedb // If we have a followup block, run that against the current state to pre-cache @@ -1963,7 +1966,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them - blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits) + blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) blockInsertTimer.UpdateSince(start) return &blockProcessingResult{usedGas: usedGas, procTime: proctime, status: status}, nil diff --git a/core/blockchain_test.go b/core/blockchain_test.go index f20252da8c2a..e4bc3e09a657 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -785,7 +785,7 @@ func testFastVsFullChains(t *testing.T, scheme string) { t.Fatalf("failed to insert receipt %d: %v", n, err) } // Freezer style fast import the chain. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -875,12 +875,12 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } ) - height := uint64(1024) + height := uint64(64) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) // makeDb creates a db instance for testing. makeDb := func() ethdb.Database { - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1712,7 +1712,7 @@ func TestTrieForkGC(t *testing.T) { Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee), } - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) // Generate a bunch of fork blocks, each side forking from the canonical chain forks := make([]*types.Block, len(blocks)) @@ -1740,7 +1740,7 @@ func TestTrieForkGC(t *testing.T) { } } // Dereference all the recent tries and ensure no past trie is left in - for i := 0; i < TriesInMemory; i++ { + for i := 0; i < state.TriesInMemory; i++ { chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } @@ -1764,11 +1764,11 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) + original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) + competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*state.TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer db.Close() chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) @@ -1804,7 +1804,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := TriesInMemory + states := state.TriesInMemory if scheme == rawdb.PathScheme { states = states + 1 } @@ -1833,7 +1833,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) { funds = big.NewInt(1000000000) gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}} ) - height := uint64(1024) + height := uint64(64) _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) // Import the chain as a ancient-first node and ensure all pointers are updated @@ -1908,7 +1908,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) { } // Set up a BlockChain that uses the ancient store. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -1972,13 +1972,13 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // We must use a pretty long chain to ensure that the fork doesn't overtake us // until after at least 128 blocks post tip - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) b.OffsetTime(-9) }) // Import the canonical chain - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer diskdb.Close() chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) @@ -1992,7 +1992,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) { } // Generate fork chain, starting from an early block parent := blocks[10] - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) @@ -2055,7 +2055,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Set the terminal total difficulty in the config gspec.Config.TerminalTotalDifficulty = big.NewInt(0) } - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { + genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*state.TriesInMemory, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key) if err != nil { t.Fatalf("failed to create tx: %v", err) @@ -2070,9 +2070,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - lastPrunedIndex := len(blocks) - TriesInMemory - 1 + lastPrunedIndex := len(blocks) - state.TriesInMemory - 1 lastPrunedBlock := blocks[lastPrunedIndex] - firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory] + firstNonPrunedBlock := blocks[len(blocks)-state.TriesInMemory] // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { @@ -2099,7 +2099,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Generate fork chain, make it longer than canon parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock parent := blocks[parentIndex] - fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*state.TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) if int(b.header.Number.Uint64()) >= mergeBlock { b.SetPoS() @@ -2190,7 +2190,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) { b.OffsetTime(-9) // A higher difficulty }) // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2361,7 +2361,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i } }) // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { t.Fatalf("failed to create temp freezer db: %v", err) } @@ -2742,7 +2742,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { BaseFee: big.NewInt(params.InitialBaseFee), } // Generate and import the canonical chain - _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil) + _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { @@ -2755,9 +2755,9 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) { } // In path-based trie database implementation, it will keep 128 diff + 1 disk // layers, totally 129 latest states available. In hash-based it's 128. - states := TriesInMemory + states := state.TriesInMemory if scheme == rawdb.PathScheme { - states = TriesInMemory + 1 + states = state.TriesInMemory + 1 } lastPrunedIndex := len(blocks) - states - 1 lastPrunedBlock := blocks[lastPrunedIndex] @@ -3634,18 +3634,19 @@ func testSetCanonical(t *testing.T, scheme string) { Alloc: types.GenesisAlloc{address: {Balance: funds}}, BaseFee: big.NewInt(params.InitialBaseFee), } - signer = types.LatestSigner(gspec.Config) - engine = ethash.NewFaker() + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() + chainLength = 10 ) // Generate and import the canonical chain - _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { + _, canon, _ := GenerateChainWithGenesis(gspec, engine, chainLength, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) } gen.AddTx(tx) }) - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) defer diskdb.Close() chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) @@ -3659,7 +3660,7 @@ func testSetCanonical(t *testing.T, scheme string) { } // Generate the side chain and import them - _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { + _, side, _ := GenerateChainWithGenesis(gspec, engine, chainLength, func(i int, gen *BlockGen) { tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key) if err != nil { panic(err) @@ -3698,8 +3699,8 @@ func testSetCanonical(t *testing.T, scheme string) { verify(side[len(side)-1]) // Reset the chain head to original chain - chain.SetCanonical(canon[TriesInMemory-1]) - verify(canon[TriesInMemory-1]) + chain.SetCanonical(canon[chainLength-1]) + verify(canon[chainLength-1]) } // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted diff --git a/core/bloombits/scheduler.go b/core/bloombits/scheduler.go index 6449c7465a17..a523bc55ab49 100644 --- a/core/bloombits/scheduler.go +++ b/core/bloombits/scheduler.go @@ -23,7 +23,7 @@ import ( // request represents a bloom retrieval task to prioritize and pull from the local // database or remotely from the network. type request struct { - section uint64 // Section index to retrieve the a bit-vector from + section uint64 // Section index to retrieve the bit-vector from bit uint // Bit index within the section to retrieve the vector of } diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index f09960901558..bf3bde756cb9 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -228,7 +228,7 @@ func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Heade b.t.Error("Unexpected call to Process") // Can't use Fatal since this is not the test's goroutine. // Returning error stops the chainIndexer's updateLoop - return errors.New("Unexpected call to Process") + return errors.New("unexpected call to Process") case b.processCh <- header.Number.Uint64(): } return nil diff --git a/core/chain_makers.go b/core/chain_makers.go index 13d7cb86c043..58985347bb31 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index a2ec9e6507d4..6241f3fb6960 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -43,12 +43,11 @@ func TestGeneratePOSChain(t *testing.T) { bb = common.Address{0xbb} funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether)) config = *params.AllEthashProtocolChanges - asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500") gspec = &Genesis{ Config: &config, Alloc: types.GenesisAlloc{ address: {Balance: funds}, - params.BeaconRootsAddress: {Balance: common.Big0, Code: asm4788}, + params.BeaconRootsAddress: {Code: params.BeaconRootsCode}, }, BaseFee: big.NewInt(params.InitialBaseFee), Difficulty: common.Big1, diff --git a/core/error.go b/core/error.go index e6e6ba2f90c3..161538fe4323 100644 --- a/core/error.go +++ b/core/error.go @@ -64,6 +64,11 @@ var ( // than init code size limit. ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientBalanceWitness is returned if the transaction sender has enough + // funds to cover the transfer, but not enough to pay for witness access/modification + // costs for the transaction + ErrInsufficientBalanceWitness = errors.New("insufficient funds to cover witness access costs for transaction") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/genesis.go b/core/genesis.go index f05e84199ae4..f1b4490f2e22 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -476,7 +476,7 @@ func (g *Genesis) ToBlock() *types.Block { } } } - return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)).WithWithdrawals(withdrawals) + return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil)) } // Commit writes the block and state of a genesis specification to the database. @@ -593,6 +593,8 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis { common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b + // Pre-deploy EIP-4788 system contract + params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode}, }, } if faucet != nil { diff --git a/core/genesis_test.go b/core/genesis_test.go index 61be0bd252c6..ab408327d4e6 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -304,7 +304,7 @@ func TestVerkleGenesisCommit(t *testing.T) { }, } - expected := common.Hex2Bytes("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b") + expected := common.FromHex("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b") got := genesis.ToBlock().Root().Bytes() if !bytes.Equal(got, expected) { t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) @@ -314,7 +314,7 @@ func TestVerkleGenesisCommit(t *testing.T) { triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults}) block := genesis.MustCommit(db, triedb) if !bytes.Equal(block.Root().Bytes(), expected) { - t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) + t.Fatalf("invalid genesis state root, expected %x, got %x", expected, block.Root()) } // Test that the trie is verkle @@ -322,7 +322,7 @@ func TestVerkleGenesisCommit(t *testing.T) { t.Fatalf("expected trie to be verkle") } - if !rawdb.ExistsAccountTrieNode(db, nil) { + if !rawdb.HasAccountTrieNode(db, nil) { t.Fatal("could not find node") } } diff --git a/core/mkalloc.go b/core/mkalloc.go index 201c2fe7de8d..cc4955f0383c 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -101,6 +101,7 @@ func main() { if err != nil { panic(err) } + defer file.Close() if err := json.NewDecoder(file).Decode(g); err != nil { panic(err) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 5a4af5bb877b..025be7ade7f4 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -753,7 +753,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles).WithWithdrawals(body.Withdrawals) + return types.NewBlockWithHeader(header).WithBody(*body) } // WriteBlock serializes a block into the database, header and body separately. @@ -843,7 +843,11 @@ func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { } for _, bad := range badBlocks { if bad.Header.Hash() == hash { - return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals) + block := types.NewBlockWithHeader(bad.Header) + if bad.Body != nil { + block = block.WithBody(*bad.Body) + } + return block } } return nil @@ -862,7 +866,11 @@ func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { } var blocks []*types.Block for _, bad := range badBlocks { - blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals)) + block := types.NewBlockWithHeader(bad.Header) + if bad.Body != nil { + block = block.WithBody(*bad.Body) + } + blocks = append(blocks, block) } return blocks } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index a7ceb72998a1..fdc940b57e66 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -640,7 +640,7 @@ func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block { Number: big.NewInt(int64(i)), Extra: []byte("test block"), } - blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil) + blocks[i] = types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) blocks[i].Hash() // pre-cache the block hash } return blocks diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 124389ba7a13..78dba000fcef 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -76,7 +76,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, &types.Body{Transactions: txs}, nil, newTestHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index e34b24fd7661..44eb715d04e2 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "golang.org/x/crypto/sha3" ) // HashScheme is the legacy hash-based state scheme with which trie nodes are @@ -50,7 +49,7 @@ const PathScheme = "path" type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { @@ -65,33 +64,15 @@ func (h *hasher) release() { hasherPool.Put(h) } -// ReadAccountTrieNode retrieves the account trie node and the associated node -// hash with the specified node path. -func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { - data, err := db.Get(accountTrieNodeKey(path)) - if err != nil { - return nil, common.Hash{} - } - h := newHasher() - defer h.release() - return data, h.hash(data) -} - -// HasAccountTrieNode checks the account trie node presence with the specified -// node path and the associated node hash. -func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { - data, err := db.Get(accountTrieNodeKey(path)) - if err != nil { - return false - } - h := newHasher() - defer h.release() - return h.hash(data) == hash +// ReadAccountTrieNode retrieves the account trie node with the specified node path. +func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) []byte { + data, _ := db.Get(accountTrieNodeKey(path)) + return data } -// ExistsAccountTrieNode checks the presence of the account trie node with the +// HasAccountTrieNode checks the presence of the account trie node with the // specified node path, regardless of the node hash. -func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { +func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { has, err := db.Has(accountTrieNodeKey(path)) if err != nil { return false @@ -113,33 +94,15 @@ func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { } } -// ReadStorageTrieNode retrieves the storage trie node and the associated node -// hash with the specified node path. -func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { - data, err := db.Get(storageTrieNodeKey(accountHash, path)) - if err != nil { - return nil, common.Hash{} - } - h := newHasher() - defer h.release() - return data, h.hash(data) -} - -// HasStorageTrieNode checks the storage trie node presence with the provided -// node path and the associated node hash. -func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { - data, err := db.Get(storageTrieNodeKey(accountHash, path)) - if err != nil { - return false - } - h := newHasher() - defer h.release() - return h.hash(data) == hash +// ReadStorageTrieNode retrieves the storage trie node with the specified node path. +func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) []byte { + data, _ := db.Get(storageTrieNodeKey(accountHash, path)) + return data } -// ExistsStorageTrieNode checks the presence of the storage trie node with the +// HasStorageTrieNode checks the presence of the storage trie node with the // specified account hash and node path, regardless of the node hash. -func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { +func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { has, err := db.Has(storageTrieNodeKey(accountHash, path)) if err != nil { return false @@ -198,10 +161,18 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c case HashScheme: return HasLegacyTrieNode(db, hash) case PathScheme: + var blob []byte if owner == (common.Hash{}) { - return HasAccountTrieNode(db, path, hash) + blob = ReadAccountTrieNode(db, path) + } else { + blob = ReadStorageTrieNode(db, owner, path) } - return HasStorageTrieNode(db, owner, path, hash) + if len(blob) == 0 { + return false + } + h := newHasher() + defer h.release() + return h.hash(blob) == hash // exists but not match default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } @@ -209,43 +180,35 @@ func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash c // ReadTrieNode retrieves the trie node from database with the provided node info // and associated node hash. -// hashScheme-based lookup requires the following: -// - hash -// -// pathScheme-based lookup requires the following: -// - owner -// - path func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { switch scheme { case HashScheme: return ReadLegacyTrieNode(db, hash) case PathScheme: - var ( - blob []byte - nHash common.Hash - ) + var blob []byte if owner == (common.Hash{}) { - blob, nHash = ReadAccountTrieNode(db, path) + blob = ReadAccountTrieNode(db, path) } else { - blob, nHash = ReadStorageTrieNode(db, owner, path) + blob = ReadStorageTrieNode(db, owner, path) } - if nHash != hash { + if len(blob) == 0 { return nil } + h := newHasher() + defer h.release() + if h.hash(blob) != hash { + return nil // exists but not match + } return blob default: panic(fmt.Sprintf("Unknown scheme %v", scheme)) } } -// WriteTrieNode writes the trie node into database with the provided node info -// and associated node hash. -// hashScheme-based lookup requires the following: -// - hash +// WriteTrieNode writes the trie node into database with the provided node info. // -// pathScheme-based lookup requires the following: -// - owner -// - path +// hash-scheme requires the node hash as the identifier. +// path-scheme requires the node owner and path as the identifier. func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { switch scheme { case HashScheme: @@ -261,14 +224,10 @@ func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash } } -// DeleteTrieNode deletes the trie node from database with the provided node info -// and associated node hash. -// hashScheme-based lookup requires the following: -// - hash +// DeleteTrieNode deletes the trie node from database with the provided node info. // -// pathScheme-based lookup requires the following: -// - owner -// - path +// hash-scheme requires the node hash as the identifier. +// path-scheme requires the node owner and path as the identifier. func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { switch scheme { case HashScheme: @@ -287,9 +246,8 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has // ReadStateScheme reads the state scheme of persistent state, or none // if the state is not present in database. func ReadStateScheme(db ethdb.Reader) string { - // Check if state in path-based scheme is present - blob, _ := ReadAccountTrieNode(db, nil) - if len(blob) != 0 { + // Check if state in path-based scheme is present. + if HasAccountTrieNode(db, nil) { return PathScheme } // The root node might be deleted during the initial snap sync, check @@ -304,8 +262,7 @@ func ReadStateScheme(db ethdb.Reader) string { if header == nil { return "" // empty datadir } - blob = ReadLegacyTrieNode(db, header.Root) - if len(blob) == 0 { + if !HasLegacyTrieNode(db, header.Root) { return "" // no state in disk } return HashScheme diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index e88867af0e64..44867ded04ab 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -16,7 +16,11 @@ package rawdb -import "path/filepath" +import ( + "path/filepath" + + "github.com/ethereum/go-ethereum/ethdb" +) // The list of table names of chain freezer. const ( @@ -75,7 +79,15 @@ var ( // freezers the collections of all builtin freezers. var freezers = []string{ChainFreezerName, StateFreezerName} -// NewStateFreezer initializes the freezer for state history. -func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) { - return NewResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) +// NewStateFreezer initializes the ancient store for state history. +// +// - if the empty directory is given, initializes the pure in-memory +// state freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// state freezer. +func NewStateFreezer(ancientDir string, readOnly bool) (ethdb.ResettableAncientStore, error) { + if ancientDir == "" { + return NewMemoryFreezer(readOnly, stateFreezerNoSnappy), nil + } + return newResettableFreezer(filepath.Join(ancientDir, StateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) } diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 428cda544b03..1c69639c9d04 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -89,20 +89,17 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { infos = append(infos, info) case StateFreezerName: - if ReadStateScheme(db) != PathScheme { - continue - } datadir, err := db.AncientDatadir() if err != nil { return nil, err } f, err := NewStateFreezer(datadir, true) if err != nil { - return nil, err + continue // might be possible the state freezer is not existent } defer f.Close() - info, err := inspect(StateFreezerName, stateFreezerNoSnappy, f) + info, err := inspect(freezer, stateFreezerNoSnappy, f) if err != nil { return nil, err } diff --git a/core/rawdb/ancienttest/testsuite.go b/core/rawdb/ancienttest/testsuite.go new file mode 100644 index 000000000000..70de263c0435 --- /dev/null +++ b/core/rawdb/ancienttest/testsuite.go @@ -0,0 +1,325 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ancienttest + +import ( + "bytes" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/internal/testrand" +) + +// TestAncientSuite runs a suite of tests against an ancient database +// implementation. +func TestAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + // Test basic read methods + t.Run("BasicRead", func(t *testing.T) { basicRead(t, newFn) }) + + // Test batch read method + t.Run("BatchRead", func(t *testing.T) { batchRead(t, newFn) }) + + // Test basic write methods + t.Run("BasicWrite", func(t *testing.T) { basicWrite(t, newFn) }) + + // Test if data mutation is allowed after db write + t.Run("nonMutable", func(t *testing.T) { nonMutable(t, newFn) }) +} + +func basicRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < len(data); i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Test basic tail and head retrievals + tail, err := db.Tail() + if err != nil || tail != 10 { + t.Fatal("Failed to retrieve tail") + } + ancient, err := db.Ancients() + if err != nil || ancient != 90 { + t.Fatal("Failed to retrieve ancient") + } + + // Test the deleted items shouldn't be reachable + var cases = []struct { + start int + limit int + }{ + {0, 10}, + {90, 100}, + } + for _, c := range cases { + for i := c.start; i < c.limit; i++ { + exist, err := db.HasAncient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if exist { + t.Fatalf("Item %d is already truncated", uint64(i)) + } + _, err = db.Ancient("a", uint64(i)) + if err == nil { + t.Fatal("Error is expected for non-existent item") + } + } + } + + // Test the items in range should be reachable + for i := 10; i < 90; i++ { + exist, err := db.HasAncient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if !exist { + t.Fatalf("Item %d is missing", uint64(i)) + } + blob, err := db.Ancient("a", uint64(i)) + if err != nil { + t.Fatalf("Failed to retrieve item, %v", err) + } + if !bytes.Equal(blob, data[i]) { + t.Fatalf("Unexpected item content, want: %v, got: %v", data[i], blob) + } + } + + // Test the items in unknown table shouldn't be reachable + exist, err := db.HasAncient("b", uint64(0)) + if err != nil { + t.Fatalf("Failed to check presence, %v", err) + } + if exist { + t.Fatal("Item in unknown table shouldn't be found") + } + _, err = db.Ancient("b", uint64(0)) + if err == nil { + t.Fatal("Error is expected for unknown table") + } +} + +func batchRead(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Test the items in range should be reachable + var cases = []struct { + start uint64 + count uint64 + maxSize uint64 + expStart int + expLimit int + }{ + // Items in range [10, 90) with no size limitation + { + 10, 80, 0, 10, 90, + }, + // Items in range [10, 90) with 32 size cap, single item is expected + { + 10, 80, 32, 10, 11, + }, + // Items in range [10, 90) with 31 size cap, single item is expected + { + 10, 80, 31, 10, 11, + }, + // Items in range [10, 90) with 32*80 size cap, all items are expected + { + 10, 80, 32 * 80, 10, 90, + }, + // Extra items above the last item are not returned + { + 10, 90, 0, 10, 90, + }, + } + for i, c := range cases { + batch, err := db.AncientRange("a", c.start, c.count, c.maxSize) + if err != nil { + t.Fatalf("Failed to retrieve item in range, %v", err) + } + if !reflect.DeepEqual(batch, data[c.expStart:c.expLimit]) { + t.Fatalf("Case %d, Batch content is not matched", i) + } + } + + // Test out-of-range / zero-size retrieval should be rejected + _, err := db.AncientRange("a", 0, 1, 0) + if err == nil { + t.Fatal("Out-of-range retrieval should be rejected") + } + _, err = db.AncientRange("a", 90, 1, 0) + if err == nil { + t.Fatal("Out-of-range retrieval should be rejected") + } + _, err = db.AncientRange("a", 10, 0, 0) + if err == nil { + t.Fatal("Zero-size retrieval should be rejected") + } + + // Test item in unknown table shouldn't be reachable + _, err = db.AncientRange("b", 10, 1, 0) + if err == nil { + t.Fatal("Item in unknown table shouldn't be found") + } +} + +func basicWrite(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + var ( + db = newFn([]string{"a", "b"}) + dataA = makeDataset(100, 32) + dataB = makeDataset(100, 32) + ) + defer db.Close() + + // The ancient write to tables should be aligned + _, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + } + return nil + }) + if err == nil { + t.Fatal("Unaligned ancient write should be rejected") + } + + // Test normal ancient write + size, err := db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } + wantSize := int64(6400) + if size != wantSize { + t.Fatalf("Ancient write size is not expected, want: %d, got: %d", wantSize, size) + } + + // Write should work after head truncating + db.TruncateHead(90) + _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 90; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } + + // Write should work after truncating everything + db.TruncateTail(0) + _, err = db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), dataA[i]) + op.AppendRaw("b", uint64(i), dataB[i]) + } + return nil + }) + if err != nil { + t.Fatalf("Failed to write ancient data %v", err) + } +} + +func nonMutable(t *testing.T, newFn func(kinds []string) ethdb.AncientStore) { + db := newFn([]string{"a"}) + defer db.Close() + + // We write 100 zero-bytes to the freezer and immediately mutate the slice + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + data := make([]byte, 100) + op.AppendRaw("a", uint64(0), data) + for i := range data { + data[i] = 0xff + } + return nil + }) + // Now read it. + data, err := db.Ancient("a", uint64(0)) + if err != nil { + t.Fatal(err) + } + for k, v := range data { + if v != 0 { + t.Fatalf("byte %d != 0: %x", k, v) + } + } +} + +// TestResettableAncientSuite runs a suite of tests against a resettable ancient +// database implementation. +func TestResettableAncientSuite(t *testing.T, newFn func(kinds []string) ethdb.ResettableAncientStore) { + t.Run("Reset", func(t *testing.T) { + var ( + db = newFn([]string{"a"}) + data = makeDataset(100, 32) + ) + defer db.Close() + + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + db.TruncateTail(10) + db.TruncateHead(90) + + // Ancient write should work after resetting + db.Reset() + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + for i := 0; i < 100; i++ { + op.AppendRaw("a", uint64(i), data[i]) + } + return nil + }) + }) +} + +func makeDataset(size, value int) [][]byte { + var vals [][]byte + for i := 0; i < size; i += 1 { + vals = append(vals, testrand.Bytes(value)) + } + return vals +} diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index d8214874bdb8..7a0b819b6fa0 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -39,26 +39,40 @@ const ( freezerBatchLimit = 30000 ) -// chainFreezer is a wrapper of freezer with additional chain freezing feature. -// The background thread will keep moving ancient chain segments from key-value -// database to flat files for saving space on live database. +// chainFreezer is a wrapper of chain ancient store with additional chain freezing +// feature. The background thread will keep moving ancient chain segments from +// key-value database to flat files for saving space on live database. type chainFreezer struct { - *Freezer + ethdb.AncientStore // Ancient store for storing cold chain segment + quit chan struct{} wg sync.WaitGroup trigger chan chan struct{} // Manual blocking freeze trigger, test determinism } -// newChainFreezer initializes the freezer for ancient chain data. +// newChainFreezer initializes the freezer for ancient chain segment. +// +// - if the empty directory is given, initializes the pure in-memory +// state freezer (e.g. dev mode). +// - if non-empty directory is given, initializes the regular file-based +// state freezer. func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) { - freezer, err := NewChainFreezer(datadir, namespace, readonly) + var ( + err error + freezer ethdb.AncientStore + ) + if datadir == "" { + freezer = NewMemoryFreezer(readonly, chainFreezerNoSnappy) + } else { + freezer, err = NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) + } if err != nil { return nil, err } return &chainFreezer{ - Freezer: freezer, - quit: make(chan struct{}), - trigger: make(chan chan struct{}), + AncientStore: freezer, + quit: make(chan struct{}), + trigger: make(chan chan struct{}), }, nil } @@ -70,7 +84,7 @@ func (f *chainFreezer) Close() error { close(f.quit) } f.wg.Wait() - return f.Freezer.Close() + return f.AncientStore.Close() } // readHeadNumber returns the number of chain head block. 0 is returned if the @@ -167,7 +181,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { log.Debug("Current full block not old enough to freeze", "err", err) continue } - frozen := f.frozen.Load() + frozen, _ := f.Ancients() // no error will occur, safe to ignore // Short circuit if the blocks below threshold are already frozen. if frozen != 0 && frozen-1 >= threshold { @@ -190,7 +204,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { backoff = true continue } - // Batch of blocks have been frozen, flush them before wiping from leveldb + // Batch of blocks have been frozen, flush them before wiping from key-value store if err := f.Sync(); err != nil { log.Crit("Failed to flush frozen tables", "err", err) } @@ -210,7 +224,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) { // Wipe out side chains also and track dangling side chains var dangling []common.Hash - frozen = f.frozen.Load() // Needs reload after during freezeRange + frozen, _ = f.Ancients() // Needs reload after during freezeRange for number := first; number < frozen; number++ { // Always keep the genesis block in active database if number != 0 { diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 78b0a82e10fe..390424f673fc 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,7 +34,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, newTestHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -60,7 +60,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, &types.Body{Transactions: types.Transactions{tx}}, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -111,7 +111,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -138,7 +138,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, &types.Body{Transactions: types.Transactions{tx}}, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 7b2c0415cbbf..3436958de735 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -34,11 +34,13 @@ import ( "github.com/olekukonko/tablewriter" ) -// freezerdb is a database wrapper that enables freezer data retrievals. +// freezerdb is a database wrapper that enables ancient chain segment freezing. type freezerdb struct { - ancientRoot string ethdb.KeyValueStore - ethdb.AncientStore + *chainFreezer + + readOnly bool + ancientRoot string } // AncientDatadir returns the path of root ancient directory. @@ -50,7 +52,7 @@ func (frdb *freezerdb) AncientDatadir() (string, error) { // the slow ancient tables. func (frdb *freezerdb) Close() error { var errs []error - if err := frdb.AncientStore.Close(); err != nil { + if err := frdb.chainFreezer.Close(); err != nil { errs = append(errs, err) } if err := frdb.KeyValueStore.Close(); err != nil { @@ -66,12 +68,12 @@ func (frdb *freezerdb) Close() error { // a freeze cycle completes, without having to sleep for a minute to trigger the // automatic background run. func (frdb *freezerdb) Freeze() error { - if frdb.AncientStore.(*chainFreezer).readonly { + if frdb.readOnly { return errReadOnly } // Trigger a freeze cycle and block until it's done trigger := make(chan struct{}, 1) - frdb.AncientStore.(*chainFreezer).trigger <- trigger + frdb.chainFreezer.trigger <- trigger <-trigger return nil } @@ -192,8 +194,14 @@ func resolveChainFreezerDir(ancient string) string { // storage. The passed ancient indicates the path of root ancient directory // where the chain freezer can be opened. func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) { - // Create the idle freezer instance - frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly) + // Create the idle freezer instance. If the given ancient directory is empty, + // in-memory chain freezer is used (e.g. dev mode); otherwise the regular + // file-based freezer is created. + chainFreezerDir := ancient + if chainFreezerDir != "" { + chainFreezerDir = resolveChainFreezerDir(chainFreezerDir) + } + frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly) if err != nil { printChainMetadata(db) return nil, err @@ -277,7 +285,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st } } // Freezer is consistent with the key-value database, permit combining the two - if !frdb.readonly { + if !readonly { frdb.wg.Add(1) go func() { frdb.freeze(db) @@ -287,7 +295,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st return &freezerdb{ ancientRoot: ancient, KeyValueStore: db, - AncientStore: frdb, + chainFreezer: frdb, }, nil } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index b7824ddc0d2c..0f28782db9ee 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -62,7 +62,7 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000 // reserving it for go-ethereum. This would also reduce the memory requirements // of Geth, and thus also GC overhead. type Freezer struct { - frozen atomic.Uint64 // Number of blocks already frozen + frozen atomic.Uint64 // Number of items already frozen tail atomic.Uint64 // Number of the first stored item in the freezer // This lock synchronizes writers and the truncate operation, as well as @@ -76,12 +76,6 @@ type Freezer struct { closeOnce sync.Once } -// NewChainFreezer is a small utility method around NewFreezer that sets the -// default parameters for the chain storage. -func NewChainFreezer(datadir string, namespace string, readonly bool) (*Freezer, error) { - return NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) -} - // NewFreezer creates a freezer instance for maintaining immutable ordered // data according to the given parameters. // diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go new file mode 100644 index 000000000000..954b58e8747b --- /dev/null +++ b/core/rawdb/freezer_memory.go @@ -0,0 +1,428 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// memoryTable is used to store a list of sequential items in memory. +type memoryTable struct { + name string // Table name + items uint64 // Number of stored items in the table, including the deleted ones + offset uint64 // Number of deleted items from the table + data [][]byte // List of rlp-encoded items, sort in order + size uint64 // Total memory size occupied by the table + lock sync.RWMutex +} + +// newMemoryTable initializes the memory table. +func newMemoryTable(name string) *memoryTable { + return &memoryTable{name: name} +} + +// has returns an indicator whether the specified data exists. +func (t *memoryTable) has(number uint64) bool { + t.lock.RLock() + defer t.lock.RUnlock() + + return number >= t.offset && number < t.items +} + +// retrieve retrieves multiple items in sequence, starting from the index 'start'. +// It will return: +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present +func (t *memoryTable) retrieve(start uint64, count, maxBytes uint64) ([][]byte, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + var ( + size uint64 + batch [][]byte + ) + // Ensure the start is written, not deleted from the tail, and that the + // caller actually wants something. + if t.items <= start || t.offset > start || count == 0 { + return nil, errOutOfBounds + } + // Cap the item count if the retrieval is out of bound. + if start+count > t.items { + count = t.items - start + } + for n := start; n < start+count; n++ { + index := n - t.offset + if len(batch) != 0 && maxBytes != 0 && size+uint64(len(t.data[index])) > maxBytes { + return batch, nil + } + batch = append(batch, t.data[index]) + size += uint64(len(t.data[index])) + } + return batch, nil +} + +// truncateHead discards any recent data above the provided threshold number. +func (t *memoryTable) truncateHead(items uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Short circuit if nothing to delete. + if t.items <= items { + return nil + } + if items < t.offset { + return errors.New("truncation below tail") + } + t.data = t.data[:items-t.offset] + t.items = items + return nil +} + +// truncateTail discards any recent data before the provided threshold number. +func (t *memoryTable) truncateTail(items uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Short circuit if nothing to delete. + if t.offset >= items { + return nil + } + if t.items < items { + return errors.New("truncation above head") + } + t.data = t.data[items-t.offset:] + t.offset = items + return nil +} + +// commit merges the given item batch into table. It's presumed that the +// batch is ordered and continuous with table. +func (t *memoryTable) commit(batch [][]byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + for _, item := range batch { + t.size += uint64(len(item)) + } + t.data = append(t.data, batch...) + t.items += uint64(len(batch)) + return nil +} + +// memoryBatch is the singleton batch used for ancient write. +type memoryBatch struct { + data map[string][][]byte + next map[string]uint64 + size map[string]int64 +} + +func newMemoryBatch() *memoryBatch { + return &memoryBatch{ + data: make(map[string][][]byte), + next: make(map[string]uint64), + size: make(map[string]int64), + } +} + +func (b *memoryBatch) reset(freezer *MemoryFreezer) { + b.data = make(map[string][][]byte) + b.next = make(map[string]uint64) + b.size = make(map[string]int64) + + for name, table := range freezer.tables { + b.next[name] = table.items + } +} + +// Append adds an RLP-encoded item. +func (b *memoryBatch) Append(kind string, number uint64, item interface{}) error { + if b.next[kind] != number { + return errOutOrderInsertion + } + blob, err := rlp.EncodeToBytes(item) + if err != nil { + return err + } + b.data[kind] = append(b.data[kind], blob) + b.next[kind]++ + b.size[kind] += int64(len(blob)) + return nil +} + +// AppendRaw adds an item without RLP-encoding it. +func (b *memoryBatch) AppendRaw(kind string, number uint64, blob []byte) error { + if b.next[kind] != number { + return errOutOrderInsertion + } + b.data[kind] = append(b.data[kind], common.CopyBytes(blob)) + b.next[kind]++ + b.size[kind] += int64(len(blob)) + return nil +} + +// commit is called at the end of a write operation and writes all remaining +// data to tables. +func (b *memoryBatch) commit(freezer *MemoryFreezer) (items uint64, writeSize int64, err error) { + // Check that count agrees on all batches. + items = math.MaxUint64 + for name, next := range b.next { + if items < math.MaxUint64 && next != items { + return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, next, items) + } + items = next + } + // Commit all table batches. + for name, batch := range b.data { + table := freezer.tables[name] + if err := table.commit(batch); err != nil { + return 0, 0, err + } + writeSize += b.size[name] + } + return items, writeSize, nil +} + +// MemoryFreezer is an ephemeral ancient store. It implements the ethdb.AncientStore +// interface and can be used along with ephemeral key-value store. +type MemoryFreezer struct { + items uint64 // Number of items stored + tail uint64 // Number of the first stored item in the freezer + readonly bool // Flag if the freezer is only for reading + lock sync.RWMutex // Lock to protect fields + tables map[string]*memoryTable // Tables for storing everything + writeBatch *memoryBatch // Pre-allocated write batch +} + +// NewMemoryFreezer initializes an in-memory freezer instance. +func NewMemoryFreezer(readonly bool, tableName map[string]bool) *MemoryFreezer { + tables := make(map[string]*memoryTable) + for name := range tableName { + tables[name] = newMemoryTable(name) + } + return &MemoryFreezer{ + writeBatch: newMemoryBatch(), + readonly: readonly, + tables: tables, + } +} + +// HasAncient returns an indicator whether the specified data exists. +func (f *MemoryFreezer) HasAncient(kind string, number uint64) (bool, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + if table := f.tables[kind]; table != nil { + return table.has(number), nil + } + return false, nil +} + +// Ancient retrieves an ancient binary blob from the in-memory freezer. +func (f *MemoryFreezer) Ancient(kind string, number uint64) ([]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + t := f.tables[kind] + if t == nil { + return nil, errUnknownTable + } + data, err := t.retrieve(number, 1, 0) + if err != nil { + return nil, err + } + return data[0], nil +} + +// AncientRange retrieves multiple items in sequence, starting from the index 'start'. +// It will return +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present +func (f *MemoryFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + t := f.tables[kind] + if t == nil { + return nil, errUnknownTable + } + return t.retrieve(start, count, maxBytes) +} + +// Ancients returns the ancient item numbers in the freezer. +func (f *MemoryFreezer) Ancients() (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.items, nil +} + +// Tail returns the number of first stored item in the freezer. +// This number can also be interpreted as the total deleted item numbers. +func (f *MemoryFreezer) Tail() (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.tail, nil +} + +// AncientSize returns the ancient size of the specified category. +func (f *MemoryFreezer) AncientSize(kind string) (uint64, error) { + f.lock.RLock() + defer f.lock.RUnlock() + + if table := f.tables[kind]; table != nil { + return table.size, nil + } + return 0, errUnknownTable +} + +// ReadAncients runs the given read operation while ensuring that no writes take place +// on the underlying freezer. +func (f *MemoryFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { + f.lock.RLock() + defer f.lock.RUnlock() + + return fn(f) +} + +// ModifyAncients runs the given write operation. +func (f *MemoryFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + // Roll back all tables to the starting position in case of error. + defer func(old uint64) { + if err == nil { + return + } + // The write operation has failed. Go back to the previous item position. + for name, table := range f.tables { + err := table.truncateHead(old) + if err != nil { + log.Error("Freezer table roll-back failed", "table", name, "index", old, "err", err) + } + } + }(f.items) + + // Modify the ancients in batch. + f.writeBatch.reset(f) + if err := fn(f.writeBatch); err != nil { + return 0, err + } + item, writeSize, err := f.writeBatch.commit(f) + if err != nil { + return 0, err + } + f.items = item + return writeSize, nil +} + +// TruncateHead discards any recent data above the provided threshold number. +// It returns the previous head number. +func (f *MemoryFreezer) TruncateHead(items uint64) (uint64, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + old := f.items + if old <= items { + return old, nil + } + for _, table := range f.tables { + if err := table.truncateHead(items); err != nil { + return 0, err + } + } + f.items = items + return old, nil +} + +// TruncateTail discards any recent data below the provided threshold number. +func (f *MemoryFreezer) TruncateTail(tail uint64) (uint64, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.readonly { + return 0, errReadOnly + } + old := f.tail + if old >= tail { + return old, nil + } + for _, table := range f.tables { + if err := table.truncateTail(tail); err != nil { + return 0, err + } + } + f.tail = tail + return old, nil +} + +// Sync flushes all data tables to disk. +func (f *MemoryFreezer) Sync() error { + return nil +} + +// MigrateTable processes and migrates entries of a given table to a new format. +// The second argument is a function that takes a raw entry and returns it +// in the newest format. +func (f *MemoryFreezer) MigrateTable(string, func([]byte) ([]byte, error)) error { + return errors.New("not implemented") +} + +// Close releases all the sources held by the memory freezer. It will panic if +// any following invocation is made to a closed freezer. +func (f *MemoryFreezer) Close() error { + f.lock.Lock() + defer f.lock.Unlock() + + f.tables = nil + f.writeBatch = nil + return nil +} + +// Reset drops all the data cached in the memory freezer and reset itself +// back to default state. +func (f *MemoryFreezer) Reset() error { + f.lock.Lock() + defer f.lock.Unlock() + + tables := make(map[string]*memoryTable) + for name := range f.tables { + tables[name] = newMemoryTable(name) + } + f.tables = tables + f.items, f.tail = 0, 0 + return nil +} diff --git a/core/rawdb/freezer_memory_test.go b/core/rawdb/freezer_memory_test.go new file mode 100644 index 000000000000..e71de0f62922 --- /dev/null +++ b/core/rawdb/freezer_memory_test.go @@ -0,0 +1,41 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/rawdb/ancienttest" + "github.com/ethereum/go-ethereum/ethdb" +) + +func TestMemoryFreezer(t *testing.T) { + ancienttest.TestAncientSuite(t, func(kinds []string) ethdb.AncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + return NewMemoryFreezer(false, tables) + }) + ancienttest.TestResettableAncientSuite(t, func(kinds []string) ethdb.ResettableAncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + return NewMemoryFreezer(false, tables) + }) +} diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 7a8548973819..6f8541f43bfc 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -30,16 +30,17 @@ const tmpSuffix = ".tmp" // freezerOpenFunc is the function used to open/create a freezer. type freezerOpenFunc = func() (*Freezer, error) -// ResettableFreezer is a wrapper of the freezer which makes the +// resettableFreezer is a wrapper of the freezer which makes the // freezer resettable. -type ResettableFreezer struct { - freezer *Freezer - opener freezerOpenFunc - datadir string - lock sync.RWMutex +type resettableFreezer struct { + readOnly bool + freezer *Freezer + opener freezerOpenFunc + datadir string + lock sync.RWMutex } -// NewResettableFreezer creates a resettable freezer, note freezer is +// newResettableFreezer creates a resettable freezer, note freezer is // only resettable if the passed file directory is exclusively occupied // by the freezer. And also the user-configurable ancient root directory // is **not** supported for reset since it might be a mount and rename @@ -48,7 +49,7 @@ type ResettableFreezer struct { // // The reset function will delete directory atomically and re-create the // freezer from scratch. -func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*ResettableFreezer, error) { +func newResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*resettableFreezer, error) { if err := cleanup(datadir); err != nil { return nil, err } @@ -59,10 +60,11 @@ func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTa if err != nil { return nil, err } - return &ResettableFreezer{ - freezer: freezer, - opener: opener, - datadir: datadir, + return &resettableFreezer{ + readOnly: readonly, + freezer: freezer, + opener: opener, + datadir: datadir, }, nil } @@ -70,10 +72,13 @@ func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTa // recreate the freezer from scratch. The atomicity of directory deletion // is guaranteed by the rename operation, the leftover directory will be // cleaned up in next startup in case crash happens after rename. -func (f *ResettableFreezer) Reset() error { +func (f *resettableFreezer) Reset() error { f.lock.Lock() defer f.lock.Unlock() + if f.readOnly { + return errReadOnly + } if err := f.freezer.Close(); err != nil { return err } @@ -93,7 +98,7 @@ func (f *ResettableFreezer) Reset() error { } // Close terminates the chain freezer, unmapping all the data files. -func (f *ResettableFreezer) Close() error { +func (f *resettableFreezer) Close() error { f.lock.RLock() defer f.lock.RUnlock() @@ -102,7 +107,7 @@ func (f *ResettableFreezer) Close() error { // HasAncient returns an indicator whether the specified ancient data exists // in the freezer -func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) { +func (f *resettableFreezer) HasAncient(kind string, number uint64) (bool, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -110,7 +115,7 @@ func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) } // Ancient retrieves an ancient binary blob from the append-only immutable files. -func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { +func (f *resettableFreezer) Ancient(kind string, number uint64) ([]byte, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -123,7 +128,7 @@ func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) // - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), // but will otherwise return as many items as fit into maxByteSize. // - if maxBytes is not specified, 'count' items will be returned if they are present. -func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { +func (f *resettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -131,7 +136,7 @@ func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uin } // Ancients returns the length of the frozen items. -func (f *ResettableFreezer) Ancients() (uint64, error) { +func (f *resettableFreezer) Ancients() (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -139,7 +144,7 @@ func (f *ResettableFreezer) Ancients() (uint64, error) { } // Tail returns the number of first stored item in the freezer. -func (f *ResettableFreezer) Tail() (uint64, error) { +func (f *resettableFreezer) Tail() (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -147,7 +152,7 @@ func (f *ResettableFreezer) Tail() (uint64, error) { } // AncientSize returns the ancient size of the specified category. -func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) { +func (f *resettableFreezer) AncientSize(kind string) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -156,7 +161,7 @@ func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) { // ReadAncients runs the given read operation while ensuring that no writes take place // on the underlying freezer. -func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { +func (f *resettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) { f.lock.RLock() defer f.lock.RUnlock() @@ -164,7 +169,7 @@ func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) ( } // ModifyAncients runs the given write operation. -func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { +func (f *resettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { f.lock.RLock() defer f.lock.RUnlock() @@ -173,7 +178,7 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) // TruncateHead discards any recent data above the provided threshold number. // It returns the previous head number. -func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { +func (f *resettableFreezer) TruncateHead(items uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -182,7 +187,7 @@ func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { // TruncateTail discards any recent data below the provided threshold number. // It returns the previous value -func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { +func (f *resettableFreezer) TruncateTail(tail uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -190,7 +195,7 @@ func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { } // Sync flushes all data tables to disk. -func (f *ResettableFreezer) Sync() error { +func (f *resettableFreezer) Sync() error { f.lock.RLock() defer f.lock.RUnlock() @@ -199,7 +204,7 @@ func (f *ResettableFreezer) Sync() error { // MigrateTable processes the entries in a given table in sequence // converting them to a new format if they're of an old format. -func (f *ResettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error { +func (f *resettableFreezer) MigrateTable(kind string, convert convertLegacyFn) error { f.lock.RLock() defer f.lock.RUnlock() diff --git a/core/rawdb/freezer_resettable_test.go b/core/rawdb/freezer_resettable_test.go index d741bc14e54f..61dc23d79841 100644 --- a/core/rawdb/freezer_resettable_test.go +++ b/core/rawdb/freezer_resettable_test.go @@ -33,7 +33,7 @@ func TestResetFreezer(t *testing.T) { {1, bytes.Repeat([]byte{1}, 2048)}, {2, bytes.Repeat([]byte{2}, 2048)}, } - f, _ := NewResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef) + f, _ := newResettableFreezer(t.TempDir(), "", false, 2048, freezerTestTableDef) defer f.Close() f.ModifyAncients(func(op ethdb.AncientWriteOp) error { @@ -87,7 +87,7 @@ func TestFreezerCleanup(t *testing.T) { {2, bytes.Repeat([]byte{2}, 2048)}, } datadir := t.TempDir() - f, _ := NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) + f, _ := newResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) f.ModifyAncients(func(op ethdb.AncientWriteOp) error { for _, item := range items { op.AppendRaw("test", item.id, item.blob) @@ -98,7 +98,7 @@ func TestFreezerCleanup(t *testing.T) { os.Rename(datadir, tmpName(datadir)) // Open the freezer again, trigger cleanup operation - f, _ = NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) + f, _ = newResettableFreezer(datadir, "", false, 2048, freezerTestTableDef) f.Close() if _, err := os.Lstat(tmpName(datadir)); !os.IsNotExist(err) { diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 93bc2c225442..72d1417200ce 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -27,6 +27,7 @@ import ( "sync" "testing" + "github.com/ethereum/go-ethereum/core/rawdb/ancienttest" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" @@ -480,3 +481,22 @@ func TestFreezerCloseSync(t *testing.T) { t.Fatalf("want %v, have %v", have, want) } } + +func TestFreezerSuite(t *testing.T) { + ancienttest.TestAncientSuite(t, func(kinds []string) ethdb.AncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + f, _ := newFreezerForTesting(t, tables) + return f + }) + ancienttest.TestResettableAncientSuite(t, func(kinds []string) ethdb.ResettableAncientStore { + tables := make(map[string]bool) + for _, kind := range kinds { + tables[kind] = true + } + f, _ := newResettableFreezer(t.TempDir(), "", false, 2048, tables) + return f + }) +} diff --git a/core/state/access_events.go b/core/state/access_events.go new file mode 100644 index 000000000000..4b6c7c7e69bb --- /dev/null +++ b/core/state/access_events.go @@ -0,0 +1,320 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "maps" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" +) + +// mode specifies how a tree location has been accessed +// for the byte value: +// * the first bit is set if the branch has been edited +// * the second bit is set if the branch has been read +type mode byte + +const ( + AccessWitnessReadFlag = mode(1) + AccessWitnessWriteFlag = mode(2) +) + +var zeroTreeIndex uint256.Int + +// AccessEvents lists the locations of the state that are being accessed +// during the production of a block. +type AccessEvents struct { + branches map[branchAccessKey]mode + chunks map[chunkAccessKey]mode + + pointCache *utils.PointCache +} + +func NewAccessEvents(pointCache *utils.PointCache) *AccessEvents { + return &AccessEvents{ + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), + pointCache: pointCache, + } +} + +// Merge is used to merge the access events that were generated during the +// execution of a tx, with the accumulation of all access events that were +// generated during the execution of all txs preceding this one in a block. +func (ae *AccessEvents) Merge(other *AccessEvents) { + for k := range other.branches { + ae.branches[k] |= other.branches[k] + } + for k, chunk := range other.chunks { + ae.chunks[k] |= chunk + } +} + +// Keys returns, predictably, the list of keys that were touched during the +// buildup of the access witness. +func (ae *AccessEvents) Keys() [][]byte { + // TODO: consider if parallelizing this is worth it, probably depending on len(ae.chunks). + keys := make([][]byte, 0, len(ae.chunks)) + for chunk := range ae.chunks { + basePoint := ae.pointCache.Get(chunk.addr[:]) + key := utils.GetTreeKeyWithEvaluatedAddress(basePoint, &chunk.treeIndex, chunk.leafKey) + keys = append(keys, key) + } + return keys +} + +func (ae *AccessEvents) Copy() *AccessEvents { + cpy := &AccessEvents{ + branches: maps.Clone(ae.branches), + chunks: maps.Clone(ae.chunks), + pointCache: ae.pointCache, + } + return cpy +} + +// AddAccount returns the gas to be charged for each of the currently cold +// member fields of an account. +func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite) + return gas +} + +// MessageCallGas returns the gas to be charged for each of the currently +// cold member fields of an account, that need to be touched when making a message +// call to that account. +func (ae *AccessEvents) MessageCallGas(destination common.Address) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.VersionLeafKey, false) + gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.CodeSizeLeafKey, false) + return gas +} + +// ValueTransferGas returns the gas to be charged for each of the currently +// cold balance member fields of the caller and the callee accounts. +func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + gas += ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + return gas +} + +// ContractCreateInitGas returns the access gas costs for the initialization of +// a contract creation. +func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, createSendsValue bool) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, true) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, true) + if createSendsValue { + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, true) + } + return gas +} + +// AddTxOrigin adds the member fields of the sender account to the access event list, +// so that cold accesses are not charged, since they are covered by the 21000 gas. +func (ae *AccessEvents) AddTxOrigin(originAddr common.Address) { + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey, false) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey, true) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey, false) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey, false) +} + +// AddTxDestination adds the member fields of the sender account to the access event list, +// so that cold accesses are not charged, since they are covered by the 21000 gas. +func (ae *AccessEvents) AddTxDestination(addr common.Address, sendsValue bool) { + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, false) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, sendsValue) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, false) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, false) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, false) +} + +// SlotGas returns the amount of gas to be charged for a cold storage access. +func (ae *AccessEvents) SlotGas(addr common.Address, slot common.Hash, isWrite bool) uint64 { + treeIndex, subIndex := utils.StorageIndex(slot.Bytes()) + return ae.touchAddressAndChargeGas(addr, *treeIndex, subIndex, isWrite) +} + +// touchAddressAndChargeGas adds any missing access event to the access event list, and returns the cold +// access cost to be charged, if need be. +func (ae *AccessEvents) touchAddressAndChargeGas(addr common.Address, treeIndex uint256.Int, subIndex byte, isWrite bool) uint64 { + stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := ae.touchAddress(addr, treeIndex, subIndex, isWrite) + + var gas uint64 + if stemRead { + gas += params.WitnessBranchReadCost + } + if selectorRead { + gas += params.WitnessChunkReadCost + } + if stemWrite { + gas += params.WitnessBranchWriteCost + } + if selectorWrite { + gas += params.WitnessChunkWriteCost + } + if selectorFill { + gas += params.WitnessChunkFillCost + } + return gas +} + +// touchAddress adds any missing access event to the access event list. +func (ae *AccessEvents) touchAddress(addr common.Address, treeIndex uint256.Int, subIndex byte, isWrite bool) (bool, bool, bool, bool, bool) { + branchKey := newBranchAccessKey(addr, treeIndex) + chunkKey := newChunkAccessKey(branchKey, subIndex) + + // Read access. + var branchRead, chunkRead bool + if _, hasStem := ae.branches[branchKey]; !hasStem { + branchRead = true + ae.branches[branchKey] = AccessWitnessReadFlag + } + if _, hasSelector := ae.chunks[chunkKey]; !hasSelector { + chunkRead = true + ae.chunks[chunkKey] = AccessWitnessReadFlag + } + + // Write access. + var branchWrite, chunkWrite, chunkFill bool + if isWrite { + if (ae.branches[branchKey] & AccessWitnessWriteFlag) == 0 { + branchWrite = true + ae.branches[branchKey] |= AccessWitnessWriteFlag + } + + chunkValue := ae.chunks[chunkKey] + if (chunkValue & AccessWitnessWriteFlag) == 0 { + chunkWrite = true + ae.chunks[chunkKey] |= AccessWitnessWriteFlag + } + // TODO: charge chunk filling costs if the leaf was previously empty in the state + } + return branchRead, chunkRead, branchWrite, chunkWrite, chunkFill +} + +type branchAccessKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchAccessKey(addr common.Address, treeIndex uint256.Int) branchAccessKey { + var sk branchAccessKey + sk.addr = addr + sk.treeIndex = treeIndex + return sk +} + +type chunkAccessKey struct { + branchAccessKey + leafKey byte +} + +func newChunkAccessKey(branchKey branchAccessKey, leafKey byte) chunkAccessKey { + var lk chunkAccessKey + lk.branchAccessKey = branchKey + lk.leafKey = leafKey + return lk +} + +// CodeChunksRangeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC, size uint64, codeLen uint64, isWrite bool) uint64 { + // note that in the case where the copied code is outside the range of the + // contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if (codeLen == 0 && size == 0) || startPC > codeLen { + return 0 + } + + endPC := startPC + size + if endPC > codeLen { + endPC = codeLen + } + if endPC > 0 { + endPC -= 1 // endPC is the last bytecode that will be touched. + } + + var statelessGasCharged uint64 + for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { + treeIndex := *uint256.NewInt((chunkNumber + 128) / 256) + subIndex := byte((chunkNumber + 128) % 256) + gas := ae.touchAddressAndChargeGas(contractAddr, treeIndex, subIndex, isWrite) + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, gas) + if overflow { + panic("overflow when adding gas") + } + } + return statelessGasCharged +} + +// VersionGas adds the account's version to the accessed data, and returns the +// amount of gas that it costs. +// Note that an access in write mode implies an access in read mode, whereas an +// access in read mode does not imply an access in write mode. +func (ae *AccessEvents) VersionGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite) +} + +// BalanceGas adds the account's balance to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) BalanceGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite) +} + +// NonceGas adds the account's nonce to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) NonceGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite) +} + +// CodeSizeGas adds the account's code size to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) CodeSizeGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite) +} + +// CodeHashGas adds the account's code hash to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) CodeHashGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, isWrite) +} diff --git a/core/state/access_events_test.go b/core/state/access_events_test.go new file mode 100644 index 000000000000..705033fe0be3 --- /dev/null +++ b/core/state/access_events_test.go @@ -0,0 +1,153 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" +) + +var ( + testAddr [20]byte + testAddr2 [20]byte +) + +func init() { + for i := byte(0); i < 20; i++ { + testAddr[i] = i + testAddr[2] = 2 * i + } +} + +func TestAccountHeaderGas(t *testing.T) { + ae := NewAccessEvents(utils.NewPointCache(1024)) + + // Check cold read cost + gas := ae.VersionGas(testAddr, false) + if gas != params.WitnessBranchReadCost+params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessChunkReadCost) + } + + // Check warm read cost + gas = ae.VersionGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check cold read costs in the same group no longer incur the branch read cost + gas = ae.BalanceGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + gas = ae.NonceGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + gas = ae.CodeSizeGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + gas = ae.CodeHashGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + + // Check cold write cost + gas = ae.VersionGas(testAddr, true) + if gas != params.WitnessBranchWriteCost+params.WitnessChunkWriteCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessBranchWriteCost) + } + + // Check warm write cost + gas = ae.VersionGas(testAddr, true) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check a write without a read charges both read and write costs + gas = ae.BalanceGas(testAddr2, true) + if gas != params.WitnessBranchReadCost+params.WitnessBranchWriteCost+params.WitnessChunkWriteCost+params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessBranchWriteCost+params.WitnessChunkWriteCost+params.WitnessChunkReadCost) + } + + // Check that a write followed by a read charges nothing + gas = ae.BalanceGas(testAddr2, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check that reading a slot from the account header only charges the + // chunk read cost. + gas = ae.SlotGas(testAddr, common.Hash{}, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } +} + +// TestContractCreateInitGas checks that the gas cost of contract creation is correctly +// calculated. +func TestContractCreateInitGas(t *testing.T) { + ae := NewAccessEvents(utils.NewPointCache(1024)) + + var testAddr [20]byte + for i := byte(0); i < 20; i++ { + testAddr[i] = i + } + + // Check cold read cost, without a value + gas := ae.ContractCreateInitGas(testAddr, false) + if gas != params.WitnessBranchWriteCost+params.WitnessBranchReadCost+params.WitnessChunkWriteCost*2+params.WitnessChunkReadCost*2 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchWriteCost+params.WitnessBranchReadCost+params.WitnessChunkWriteCost*3) + } + + // Check warm read cost + gas = ae.ContractCreateInitGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } +} + +// TestMessageCallGas checks that the gas cost of message calls is correctly +// calculated. +func TestMessageCallGas(t *testing.T) { + ae := NewAccessEvents(utils.NewPointCache(1024)) + + // Check cold read cost, without a value + gas := ae.MessageCallGas(testAddr) + if gas != params.WitnessBranchReadCost+params.WitnessChunkReadCost*2 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessChunkReadCost*2) + } + + // Check that reading the version and code size of the same account does not incur the branch read cost + gas = ae.VersionGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + gas = ae.CodeSizeGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check warm read cost + gas = ae.MessageCallGas(testAddr) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } +} diff --git a/core/state/database.go b/core/state/database.go index 188ecf0c86a5..04d7c06687c0 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" - "github.com/crate-crypto/go-ipa/banderwagon" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" @@ -40,11 +39,8 @@ const ( // Cache size granted for caching clean code. codeCacheSize = 64 * 1024 * 1024 - // commitmentSize is the size of commitment stored in cache. - commitmentSize = banderwagon.UncompressedSize - - // Cache item granted for caching commitment results. - commitmentCacheItems = 64 * 1024 * 1024 / (commitmentSize + common.AddressLength) + // Number of address->curve point associations to keep. + pointCacheSize = 4096 ) // Database wraps access to tries and contract code. @@ -67,6 +63,9 @@ type Database interface { // DiskDB returns the underlying key-value disk database. DiskDB() ethdb.KeyValueStore + // PointCache returns the cache holding points used in verkle tree key computation + PointCache() *utils.PointCache + // TrieDB returns the underlying trie database for managing trie nodes. TrieDB() *triedb.Database } @@ -139,6 +138,9 @@ type Trie interface { // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. Prove(key []byte, proofDb ethdb.KeyValueWriter) error + + // IsVerkle returns true if the trie is verkle-tree based + IsVerkle() bool } // NewDatabase creates a backing store for state. The returned database is safe for @@ -157,6 +159,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database { codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb.NewDatabase(db, config), + pointCache: utils.NewPointCache(pointCacheSize), } } @@ -167,6 +170,7 @@ func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb, + pointCache: utils.NewPointCache(pointCacheSize), } } @@ -175,12 +179,13 @@ type cachingDB struct { codeSizeCache *lru.Cache[common.Hash, int] codeCache *lru.SizeConstrainedCache[common.Hash, []byte] triedb *triedb.Database + pointCache *utils.PointCache } // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { if db.triedb.IsVerkle() { - return trie.NewVerkleTrie(root, db.triedb, utils.NewPointCache(commitmentCacheItems)) + return trie.NewVerkleTrie(root, db.triedb, db.pointCache) } tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { @@ -266,3 +271,8 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore { func (db *cachingDB) TrieDB() *triedb.Database { return db.triedb } + +// PointCache returns the cache of evaluated curve points. +func (db *cachingDB) PointCache() *utils.PointCache { + return db.pointCache +} diff --git a/core/state/journal.go b/core/state/journal.go index c0f5615c9815..ad4a654fc6a2 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -131,7 +131,8 @@ type ( storageChange struct { account *common.Address key common.Hash - prevvalue *common.Hash + prevvalue common.Hash + origvalue common.Hash } codeChange struct { account *common.Address @@ -278,7 +279,7 @@ func (ch codeChange) copy() journalEntry { } func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue) + s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue, ch.origvalue) } func (ch storageChange) dirtied() *common.Address { diff --git a/core/state/state_object.go b/core/state/state_object.go index 1454f7a459a5..b7a215bd1753 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -19,7 +19,6 @@ package state import ( "bytes" "fmt" - "io" "maps" "time" @@ -56,9 +55,20 @@ type stateObject struct { trie Trie // storage trie, which becomes non-nil on first access code []byte // contract bytecode, which gets set when code is loaded - originStorage Storage // Storage cache of original entries to dedup rewrites - pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction + originStorage Storage // Storage entries that have been accessed within the current block + dirtyStorage Storage // Storage entries that have been modified within the current transaction + pendingStorage Storage // Storage entries that have been modified within the current block + + // uncommittedStorage tracks a set of storage entries that have been modified + // but not yet committed since the "last commit operation", along with their + // original values before mutation. + // + // Specifically, the commit will be performed after each transaction before + // the byzantium fork, therefore the map is already reset at the transaction + // boundary; however post the byzantium fork, the commit will only be performed + // at the end of block, this set essentially tracks all the modifications + // made within the block. + uncommittedStorage Storage // Cache flags. dirtyCode bool // true if the code was updated @@ -87,22 +97,18 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s acct = types.NewEmptyStateAccount() } return &stateObject{ - db: db, - address: address, - addrHash: crypto.Keccak256Hash(address[:]), - origin: origin, - data: *acct, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + db: db, + address: address, + addrHash: crypto.Keccak256Hash(address[:]), + origin: origin, + data: *acct, + originStorage: make(Storage), + dirtyStorage: make(Storage), + pendingStorage: make(Storage), + uncommittedStorage: make(Storage), } } -// EncodeRLP implements rlp.Encoder. -func (s *stateObject) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &s.data) -} - func (s *stateObject) markSelfdestructed() { s.selfDestructed = true } @@ -118,46 +124,58 @@ func (s *stateObject) touch() { } } -// getTrie returns the associated storage trie. The trie will be opened -// if it's not loaded previously. An error will be returned if trie can't -// be loaded. +// getTrie returns the associated storage trie. The trie will be opened if it's +// not loaded previously. An error will be returned if trie can't be loaded. +// +// If a new trie is opened, it will be cached within the state object to allow +// subsequent reads to expand the same trie instead of reloading from disk. func (s *stateObject) getTrie() (Trie, error) { if s.trie == nil { - // Try fetching from prefetcher first - if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no prefetcher - s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) - } - if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) - if err != nil { - return nil, err - } - s.trie = tr + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) + if err != nil { + return nil, err } + s.trie = tr } return s.trie, nil } -// GetState retrieves a value from the account storage trie. +// getPrefetchedTrie returns the associated trie, as populated by the prefetcher +// if it's available. +// +// Note, opposed to getTrie, this method will *NOT* blindly cache the resulting +// trie in the state object. The caller might want to do that, but it's cleaner +// to break the hidden interdependency between retrieving tries from the db or +// from the prefetcher. +func (s *stateObject) getPrefetchedTrie() Trie { + // If there's nothing to meaningfully return, let the user figure it out by + // pulling the trie from disk. + if s.data.Root == types.EmptyRootHash || s.db.prefetcher == nil { + return nil + } + // Attempt to retrieve the trie from the prefetcher + return s.db.prefetcher.trie(s.addrHash, s.data.Root) +} + +// GetState retrieves a value associated with the given storage key. func (s *stateObject) GetState(key common.Hash) common.Hash { value, _ := s.getState(key) return value } -// getState retrieves a value from the account storage trie and also returns if -// the slot is already dirty or not. -func (s *stateObject) getState(key common.Hash) (common.Hash, bool) { - // If we have a dirty value for this state entry, return it +// getState retrieves a value associated with the given storage key, along with +// its original value. +func (s *stateObject) getState(key common.Hash) (common.Hash, common.Hash) { + origin := s.GetCommittedState(key) value, dirty := s.dirtyStorage[key] if dirty { - return value, true + return value, origin } - // Otherwise return the entry's original value - return s.GetCommittedState(key), false + return origin, origin } -// GetCommittedState retrieves a value from the committed account storage trie. +// GetCommittedState retrieves the value associated with the specific key +// without any mutations caused in the current execution. func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // If we have a pending write or clean cached, return that if value, pending := s.pendingStorage[key]; pending { @@ -173,6 +191,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { + s.originStorage[key] = common.Hash{} // track the empty slot as origin value return common.Hash{} } // If no live objects are available, attempt to use snapshots @@ -219,57 +238,64 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { func (s *stateObject) SetState(key, value common.Hash) { // If the new value is the same as old, don't set. Otherwise, track only the // dirty changes, supporting reverting all of it back to no change. - prev, dirty := s.getState(key) + prev, origin := s.getState(key) if prev == value { return } - var prevvalue *common.Hash - if dirty { - prevvalue = &prev - } // New value is different, update and journal the change s.db.journal.append(storageChange{ account: &s.address, key: key, - prevvalue: prevvalue, + prevvalue: prev, + origvalue: origin, }) if s.db.logger != nil && s.db.logger.OnStorageChange != nil { s.db.logger.OnStorageChange(s.address, key, prev, value) } - s.setState(key, &value) + s.setState(key, value, origin) } -// setState updates a value in account dirty storage. If the value being set is -// nil (assuming journal revert), the dirtyness is removed. -func (s *stateObject) setState(key common.Hash, value *common.Hash) { - // If the first set is being reverted, undo the dirty marker - if value == nil { +// setState updates a value in account dirty storage. The dirtiness will be +// removed if the value being set equals to the original value. +func (s *stateObject) setState(key common.Hash, value common.Hash, origin common.Hash) { + // Storage slot is set back to its original value, undo the dirty marker + if value == origin { delete(s.dirtyStorage, key) return } - // Otherwise restore the previous value - s.dirtyStorage[key] = *value + s.dirtyStorage[key] = value } // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. -func (s *stateObject) finalise(prefetch bool) { +func (s *stateObject) finalise() { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { - // If the slot is different from its original value, move it into the - // pending area to be committed at the end of the block (and prefetch - // the pathways). - if value != s.originStorage[key] { - s.pendingStorage[key] = value - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + if origin, exist := s.uncommittedStorage[key]; exist && origin == value { + // The slot is reverted to its original value, delete the entry + // to avoid thrashing the data structures. + delete(s.uncommittedStorage, key) + } else if exist { + // The slot is modified to another value and the slot has been + // tracked for commit, do nothing here. } else { - // Otherwise, the slot was reverted to its original value, remove it - // from the pending area to avoid thrashing the data strutures. - delete(s.pendingStorage, key) + // The slot is different from its original value and hasn't been + // tracked for commit yet. + s.uncommittedStorage[key] = s.GetCommittedState(key) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + } + // Aggregate the dirty storage slots into the pending area. It might + // be possible that the value of tracked slot here is same with the + // one in originStorage (e.g. the slot was modified in tx_a and then + // modified back in tx_b). We can't blindly remove it from pending + // map as the dirty slot might have been committed already (before the + // byzantium fork) and entry is necessary to modify the value back. + s.pendingStorage[key] = value + } + if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { + if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch); err != nil { + log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err) } - } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { - s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) } if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) @@ -286,29 +312,29 @@ func (s *stateObject) finalise(prefetch bool) { // loading or updating of the trie, an error will be returned. Furthermore, // this function will return the mutated storage trie, or nil if there is no // storage change at all. +// +// It assumes all the dirty storage slots have been finalized before. func (s *stateObject) updateTrie() (Trie, error) { - // Make sure all dirty slots are finalized into the pending storage area - s.finalise(false) - // Short circuit if nothing changed, don't bother with hashing anything - if len(s.pendingStorage) == 0 { + if len(s.uncommittedStorage) == 0 { return s.trie, nil } - // The snapshot storage map for the object - var ( - storage map[common.Hash][]byte - origin map[common.Hash][]byte - ) - tr, err := s.getTrie() - if err != nil { - s.db.setError(err) - return nil, err + // Retrieve a pretecher populated trie, or fall back to the database + tr := s.getPrefetchedTrie() + if tr != nil { + // Prefetcher returned a live trie, swap it out for the current one + s.trie = tr + } else { + // Fetcher not running or empty trie, fallback to the database trie + var err error + tr, err = s.getTrie() + if err != nil { + s.db.setError(err) + return nil, err + } } - // Insert all the pending storage updates into the trie - usedStorage := make([][]byte, 0, len(s.pendingStorage)) - - // Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes - // in circumstances similar to the following: + // Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes + // in circumstances similar to the following: // // Consider nodes `A` and `B` who share the same full node parent `P` and have no other siblings. // During the execution of a block: @@ -317,74 +343,44 @@ func (s *stateObject) updateTrie() (Trie, error) { // If the deletion is handled first, then `P` would be left with only one child, thus collapsed // into a shortnode. This requires `B` to be resolved from disk. // Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved. - var deletions []common.Hash - for key, value := range s.pendingStorage { + var ( + deletions []common.Hash + used = make([][]byte, 0, len(s.uncommittedStorage)) + ) + for key, origin := range s.uncommittedStorage { // Skip noop changes, persist actual changes - if value == s.originStorage[key] { + value, exist := s.pendingStorage[key] + if value == origin { + log.Error("Storage update was noop", "address", s.address, "slot", key) + continue + } + if !exist { + log.Error("Storage slot is not found in pending area", s.address, "slot", key) continue } - prev := s.originStorage[key] - s.originStorage[key] = value - - var encoded []byte // rlp-encoded value to be used by the snapshot if (value != common.Hash{}) { - // Encoding []byte cannot fail, ok to ignore the error. - trimmed := common.TrimLeftZeroes(value[:]) - encoded, _ = rlp.EncodeToBytes(trimmed) - if err := tr.UpdateStorage(s.address, key[:], trimmed); err != nil { + if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil { s.db.setError(err) return nil, err } - s.db.StorageUpdated += 1 + s.db.StorageUpdated.Add(1) } else { deletions = append(deletions, key) } - // Cache the mutated storage slots until commit - if storage == nil { - if storage = s.db.storages[s.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - s.db.storages[s.addrHash] = storage - } - } - khash := crypto.HashData(s.db.hasher, key[:]) - storage[khash] = encoded // encoded will be nil if it's deleted - - // Cache the original value of mutated storage slots - if origin == nil { - if origin = s.db.storagesOrigin[s.address]; origin == nil { - origin = make(map[common.Hash][]byte) - s.db.storagesOrigin[s.address] = origin - } - } - // Track the original value of slot only if it's mutated first time - if _, ok := origin[khash]; !ok { - if prev == (common.Hash{}) { - origin[khash] = nil // nil if it was not present previously - } else { - // Encoding []byte cannot fail, ok to ignore the error. - b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:])) - origin[khash] = b - } - } // Cache the items for preloading - usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure + used = append(used, common.CopyBytes(key[:])) // Copy needed for closure } for _, key := range deletions { if err := tr.DeleteStorage(s.address, key[:]); err != nil { s.db.setError(err) return nil, err } - s.db.StorageDeleted += 1 - } - // If no slots were touched, issue a warning as we shouldn't have done all - // the above work in the first place - if len(usedStorage) == 0 { - log.Error("State object update was noop", "addr", s.address, "slots", len(s.pendingStorage)) + s.db.StorageDeleted.Add(1) } if s.db.prefetcher != nil { - s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) + s.db.prefetcher.used(s.addrHash, s.data.Root, used) } - s.pendingStorage = make(Storage) // reset pending map + s.uncommittedStorage = make(Storage) // empties the commit markers return tr, nil } @@ -400,27 +396,79 @@ func (s *stateObject) updateRoot() { s.data.Root = tr.Hash() } -// commit obtains a set of dirty storage trie nodes and updates the account data. -// The returned set can be nil if nothing to commit. This function assumes all -// storage mutations have already been flushed into trie by updateRoot. -func (s *stateObject) commit() (*trienode.NodeSet, error) { - // Short circuit if trie is not even loaded, don't bother with committing anything - if s.trie == nil { +// commitStorage overwrites the clean storage with the storage changes and +// fulfills the storage diffs into the given accountUpdate struct. +func (s *stateObject) commitStorage(op *accountUpdate) { + var ( + buf = crypto.NewKeccakState() + encode = func(val common.Hash) []byte { + if val == (common.Hash{}) { + return nil + } + blob, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(val[:])) + return blob + } + ) + for key, val := range s.pendingStorage { + // Skip the noop storage changes, it might be possible the value + // of tracked slot is same in originStorage and pendingStorage + // map, e.g. the storage slot is modified in tx_a and then reset + // back in tx_b. + if val == s.originStorage[key] { + continue + } + hash := crypto.HashData(buf, key[:]) + if op.storages == nil { + op.storages = make(map[common.Hash][]byte) + } + op.storages[hash] = encode(val) + if op.storagesOrigin == nil { + op.storagesOrigin = make(map[common.Hash][]byte) + } + op.storagesOrigin[hash] = encode(s.originStorage[key]) + + // Overwrite the clean value of storage slots + s.originStorage[key] = val + } + s.pendingStorage = make(Storage) +} + +// commit obtains the account changes (metadata, storage slots, code) caused by +// state execution along with the dirty storage trie nodes. +// +// Note, commit may run concurrently across all the state objects. Do not assume +// thread-safe access to the statedb. +func (s *stateObject) commit() (*accountUpdate, *trienode.NodeSet, error) { + // commit the account metadata changes + op := &accountUpdate{ + address: s.address, + data: types.SlimAccountRLP(s.data), + } + if s.origin != nil { + op.origin = types.SlimAccountRLP(*s.origin) + } + // commit the contract code if it's modified + if s.dirtyCode { + op.code = &contractCode{ + hash: common.BytesToHash(s.CodeHash()), + blob: s.code, + } + s.dirtyCode = false // reset the dirty flag + } + // Commit storage changes and the associated storage trie + s.commitStorage(op) + if len(op.storages) == 0 { + // nothing changed, don't bother to commit the trie s.origin = s.data.Copy() - return nil, nil + return op, nil, nil } - // The trie is currently in an open state and could potentially contain - // cached mutations. Call commit to acquire a set of nodes that have been - // modified, the set can be nil if nothing to commit. root, nodes, err := s.trie.Commit(false) if err != nil { - return nil, err + return nil, nil, err } s.data.Root = root - - // Update original account data after commit s.origin = s.data.Copy() - return nodes, nil + return op, nodes, nil } // AddBalance adds amount to s's balance. @@ -463,18 +511,19 @@ func (s *stateObject) setBalance(amount *uint256.Int) { func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, - code: s.code, - originStorage: s.originStorage.Copy(), - pendingStorage: s.pendingStorage.Copy(), - dirtyStorage: s.dirtyStorage.Copy(), - dirtyCode: s.dirtyCode, - selfDestructed: s.selfDestructed, - newContract: s.newContract, + db: db, + address: s.address, + addrHash: s.addrHash, + origin: s.origin, + data: s.data, + code: s.code, + originStorage: s.originStorage.Copy(), + pendingStorage: s.pendingStorage.Copy(), + dirtyStorage: s.dirtyStorage.Copy(), + uncommittedStorage: s.uncommittedStorage.Copy(), + dirtyCode: s.dirtyCode, + selfDestructed: s.selfDestructed, + newContract: s.newContract, } if s.trie != nil { obj.trie = db.db.CopyTrie(s.trie) diff --git a/core/state/statedb.go b/core/state/statedb.go index 6d9cc907e03f..659c500c3bf1 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,11 +18,14 @@ package state import ( + "errors" "fmt" "maps" "math/big" "slices" "sort" + "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -36,9 +39,14 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" + "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" + "golang.org/x/sync/errgroup" ) +// TriesInMemory represents the number of layers that are kept in RAM. +const TriesInMemory = 128 + type revision struct { id int journalIndex int @@ -88,13 +96,6 @@ type StateDB struct { // It will be updated when the Commit is called. originalRoot common.Hash - // These maps hold the state changes (including the corresponding - // original value) that occurred in this **block**. - accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding - storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format - accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding - storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format - // This map holds 'live' objects, which will get modified while // processing a state transition. stateObjects map[common.Address]*stateObject @@ -159,12 +160,9 @@ type StateDB struct { TrieDBCommits time.Duration AccountUpdated int - StorageUpdated int + StorageUpdated atomic.Int64 AccountDeleted int - StorageDeleted int - - // Testing hooks - onCommit func(states *triestate.Set) // Hook invoked when commit is performed + StorageDeleted atomic.Int64 } // New creates a new state from a given trie. @@ -178,10 +176,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) trie: tr, originalRoot: root, snaps: snaps, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject), stateObjectsDestruct: make(map[common.Address]*types.StateAccount), mutations: make(map[common.Address]*mutation), @@ -208,7 +202,8 @@ func (s *StateDB) SetLogger(l *tracing.Hooks) { // commit phase, most of the needed data is already hot. func (s *StateDB) StartPrefetcher(namespace string) { if s.prefetcher != nil { - s.prefetcher.close() + s.prefetcher.terminate(false) + s.prefetcher.report() s.prefetcher = nil } if s.snap != nil { @@ -220,7 +215,8 @@ func (s *StateDB) StartPrefetcher(namespace string) { // from the gathered metrics. func (s *StateDB) StopPrefetcher() { if s.prefetcher != nil { - s.prefetcher.close() + s.prefetcher.terminate(false) + s.prefetcher.report() s.prefetcher = nil } } @@ -340,7 +336,7 @@ func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { return common.Hash{} } -// TxIndex returns the current transaction index set by Prepare. +// TxIndex returns the current transaction index set by SetTxContext. func (s *StateDB) TxIndex() int { return s.txIndex } @@ -369,7 +365,7 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { return common.Hash{} } -// GetState retrieves a value from the given account's storage trie. +// GetState retrieves the value associated with the specific key. func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -378,7 +374,8 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { return common.Hash{} } -// GetCommittedState retrieves a value from the given account's committed storage trie. +// GetCommittedState retrieves the value associated with the specific key +// without any mutations caused in the current execution. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -538,9 +535,6 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common // updateStateObject writes the given object to the trie. func (s *StateDB) updateStateObject(obj *stateObject) { - // Track the amount of time wasted on updating the account from the trie - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - // Encode the account and update the account trie addr := obj.Address() if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { @@ -549,30 +543,10 @@ func (s *StateDB) updateStateObject(obj *stateObject) { if obj.dirtyCode { s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) } - // Cache the data until commit. Note, this update mechanism is not symmetric - // to the deletion, because whereas it is enough to track account updates - // at commit time, deletions need tracking at transaction boundary level to - // ensure we capture state clearing. - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - - // Track the original value of mutated account, nil means it was not present. - // Skip if it has been tracked (because updateStateObject may be called - // multiple times in a block). - if _, ok := s.accountsOrigin[obj.address]; !ok { - if obj.origin == nil { - s.accountsOrigin[obj.address] = nil - } else { - s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin) - } - } } // deleteStateObject removes the given object from the state trie. func (s *StateDB) deleteStateObject(addr common.Address) { - // Track the amount of time wasted on deleting the account from the trie - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - - // Delete the account from the trie if err := s.trie.DeleteAccount(addr); err != nil { s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } @@ -687,10 +661,6 @@ func (s *StateDB) Copy() *StateDB { trie: s.db.CopyTrie(s.trie), hasher: crypto.NewKeccakState(), originalRoot: s.originalRoot, - accounts: copySet(s.accounts), - storages: copy2DSet(s.storages), - accountsOrigin: copySet(s.accountsOrigin), - storagesOrigin: copy2DSet(s.storagesOrigin), stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)), stateObjectsDestruct: maps.Clone(s.stateObjectsDestruct), mutations: make(map[common.Address]*mutation, len(s.mutations)), @@ -737,13 +707,6 @@ func (s *StateDB) Copy() *StateDB { // in the middle of a transaction. state.accessList = s.accessList.Copy() state.transientStorage = s.transientStorage.Copy() - - // If there's a prefetcher running, make an inactive copy of it that can - // only access data but does not actively preload (since the user will not - // know that they need to explicitly terminate an active copy). - if s.prefetcher != nil { - state.prefetcher = s.prefetcher.copy() - } return state } @@ -806,15 +769,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { if _, ok := s.stateObjectsDestruct[obj.address]; !ok { s.stateObjectsDestruct[obj.address] = obj.origin } - // Note, we can't do this only at the end of a block because multiple - // transactions within the same block might self destruct and then - // resurrect an account; but the snapshotter needs both events. - delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { - obj.finalise(true) // Prefetch slots in the background + obj.finalise() s.markUpdate(addr) } // At this point, also ship the address off to the precacher. The precacher @@ -823,7 +779,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch); err != nil { + log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err) + } } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -836,42 +794,52 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) - // If there was a trie prefetcher operating, it gets aborted and irrevocably - // modified after we start retrieving tries. Remove it from the statedb after - // this round of use. - // - // This is weird pre-byzantium since the first tx runs with a prefetcher and - // the remainder without, but pre-byzantium even the initial prefetcher is - // useless, so no sleep lost. - prefetcher := s.prefetcher + // If there was a trie prefetcher operating, terminate it async so that the + // individual storage tries can be updated as soon as the disk load finishes. if s.prefetcher != nil { + s.prefetcher.terminate(true) defer func() { - s.prefetcher.close() - s.prefetcher = nil + s.prefetcher.report() + s.prefetcher = nil // Pre-byzantium, unset any used up prefetcher }() } - // Although naively it makes sense to retrieve the account trie and then do - // the contract storage and account updates sequentially, that short circuits - // the account prefetcher. Instead, let's process all the storage updates - // first, giving the account prefetches just a few more milliseconds of time - // to pull useful data from disk. - start := time.Now() + // Process all storage updates concurrently. The state object update root + // method will internally call a blocking trie fetch from the prefetcher, + // so there's no need to explicitly wait for the prefetchers to finish. + var ( + start = time.Now() + workers errgroup.Group + ) + if s.db.TrieDB().IsVerkle() { + // Whilst MPT storage tries are independent, Verkle has one single trie + // for all the accounts and all the storage slots merged together. The + // former can thus be simply parallelized, but updating the latter will + // need concurrency support within the trie itself. That's a TODO for a + // later time. + workers.SetLimit(1) + } for addr, op := range s.mutations { - if op.applied { - continue - } - if op.isDelete() { + if op.applied || op.isDelete() { continue } - s.stateObjects[addr].updateRoot() + obj := s.stateObjects[addr] // closure for the task runner below + workers.Go(func() error { + obj.updateRoot() + return nil + }) } + workers.Wait() s.StorageUpdates += time.Since(start) // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. - if prefetcher != nil { - if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil { + start = time.Now() + + if s.prefetcher != nil { + if trie := s.prefetcher.trie(common.Hash{}, s.originalRoot); trie == nil { + log.Error("Failed to retrieve account pre-fetcher trie") + } else { s.trie = trie } } @@ -907,8 +875,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.deleteStateObject(deletedAddr) s.AccountDeleted += 1 } - if prefetcher != nil { - prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) + s.AccountUpdates += time.Since(start) + + if s.prefetcher != nil { + s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } // Track the amount of time wasted on hashing the account trie defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) @@ -1009,10 +979,9 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r } // deleteStorage is designed to delete the storage trie of a designated account. -// It could potentially be terminated if the storage size is excessively large, -// potentially leading to an out-of-memory panic. The function will make an attempt -// to utilize an efficient strategy if the associated state snapshot is reachable; -// otherwise, it will resort to a less-efficient approach. +// The function will make an attempt to utilize an efficient strategy if the +// associated state snapshot is reachable; otherwise, it will resort to a less +// efficient approach. func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) { var ( start = time.Now() @@ -1047,75 +1016,61 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root } // handleDestruction processes all destruction markers and deletes the account -// and associated storage slots if necessary. There are four possible situations -// here: -// -// - the account was not existent and be marked as destructed +// and associated storage slots if necessary. There are four potential scenarios +// as following: // -// - the account was not existent and be marked as destructed, -// however, it's resurrected later in the same block. -// -// - the account was existent and be marked as destructed -// -// - the account was existent and be marked as destructed, -// however it's resurrected later in the same block. +// (a) the account was not existent and be marked as destructed +// (b) the account was not existent and be marked as destructed, +// however, it's resurrected later in the same block. +// (c) the account was existent and be marked as destructed +// (d) the account was existent and be marked as destructed, +// however it's resurrected later in the same block. // // In case (a), nothing needs be deleted, nil to nil transition can be ignored. -// // In case (b), nothing needs be deleted, nil is used as the original value for // newly created account and storages -// // In case (c), **original** account along with its storages should be deleted, // with their values be tracked as original value. -// // In case (d), **original** account along with its storages should be deleted, // with their values be tracked as original value. -func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) error { - // Short circuit if geth is running with hash mode. This procedure can consume - // considerable time and storage deletion isn't supported in hash mode, thus - // preemptively avoiding unnecessary expenses. - if s.db.TrieDB().Scheme() == rawdb.HashScheme { - return nil - } +func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) { + var ( + nodes []*trienode.NodeSet + buf = crypto.NewKeccakState() + deletes = make(map[common.Hash]*accountDelete) + ) for addr, prev := range s.stateObjectsDestruct { - // The original account was non-existing, and it's marked as destructed - // in the scope of block. It can be case (a) or (b). - // - for (a), skip it without doing anything. - // - for (b), track account's original value as nil. It may overwrite - // the data cached in s.accountsOrigin set by 'updateStateObject'. - addrHash := crypto.Keccak256Hash(addr[:]) + // The account was non-existent, and it's marked as destructed in the scope + // of block. It can be either case (a) or (b) and will be interpreted as + // null->null state transition. + // - for (a), skip it without doing anything + // - for (b), the resurrected account with nil as original will be handled afterwards if prev == nil { - if _, ok := s.accounts[addrHash]; ok { - s.accountsOrigin[addr] = nil // case (b) - } continue } - // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'. - s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d) + // The account was existent, it can be either case (c) or (d). + addrHash := crypto.HashData(buf, addr.Bytes()) + op := &accountDelete{ + address: addr, + origin: types.SlimAccountRLP(*prev), + } + deletes[addrHash] = op - // Short circuit if the storage was empty. + // Short circuit if the origin storage was empty. if prev.Root == types.EmptyRootHash { continue } - // Remove storage slots belong to the account. + // Remove storage slots belonging to the account. slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) if err != nil { - return fmt.Errorf("failed to delete storage, err: %w", err) - } - if s.storagesOrigin[addr] == nil { - s.storagesOrigin[addr] = slots - } else { - // It can overwrite the data in s.storagesOrigin[addrHash] set by - // 'object.updateTrie'. - for key, val := range slots { - s.storagesOrigin[addr][key] = val - } - } - if err := nodes.Merge(set); err != nil { - return err + return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err) } + op.storagesOrigin = slots + + // Aggregate the associated trie node changes. + nodes = append(nodes, set) } - return nil + return deletes, nodes, nil } // GetTrie returns the account trie. @@ -1123,18 +1078,12 @@ func (s *StateDB) GetTrie() Trie { return s.trie } -// Commit writes the state to the underlying in-memory trie database. -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. -// -// The associated block number of the state transition is also provided -// for more chain context. -func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { +// commit gathers the state mutations accumulated along with the associated +// trie changes, resetting all internal flags with the new state as the base. +func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { - return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) + return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) } // Finalize any pending changes and merge everything into the tries s.IntermediateRoot(deleteEmptyObjects) @@ -1145,125 +1094,203 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er accountTrieNodesDeleted int storageTrieNodesUpdated int storageTrieNodesDeleted int - nodes = trienode.NewMergedNodeSet() - codeWriter = s.db.DiskDB().NewBatch() + + lock sync.Mutex // protect two maps below + nodes = trienode.NewMergedNodeSet() // aggregated trie nodes + updates = make(map[common.Hash]*accountUpdate, len(s.mutations)) // aggregated account updates + + // merge aggregates the dirty trie nodes into the global set. + // + // Given that some accounts may be destroyed and then recreated within + // the same block, it's possible that a node set with the same owner + // may already exists. In such cases, these two sets are combined, with + // the later one overwriting the previous one if any nodes are modified + // or deleted in both sets. + // + // merge run concurrently across all the state objects and account trie. + merge = func(set *trienode.NodeSet) error { + if set == nil { + return nil + } + lock.Lock() + defer lock.Unlock() + + updates, deletes := set.Size() + if set.Owner == (common.Hash{}) { + accountTrieNodesUpdated += updates + accountTrieNodesDeleted += deletes + } else { + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deletes + } + return nodes.Merge(set) + } ) - // Handle all state deletions first - if err := s.handleDestruction(nodes); err != nil { - return common.Hash{}, err + // Given that some accounts could be destroyed and then recreated within + // the same block, account deletions must be processed first. This ensures + // that the storage trie nodes deleted during destruction and recreated + // during subsequent resurrection can be combined correctly. + deletes, delNodes, err := s.handleDestruction() + if err != nil { + return nil, err + } + for _, set := range delNodes { + if err := merge(set); err != nil { + return nil, err + } } - // Handle all state updates afterwards - start := time.Now() + // Handle all state updates afterwards, concurrently to one another to shave + // off some milliseconds from the commit operation. Also accumulate the code + // writes to run in parallel with the computations. + var ( + start = time.Now() + root common.Hash + workers errgroup.Group + ) + // Schedule the account trie first since that will be the biggest, so give + // it the most time to crunch. + // + // TODO(karalabe): This account trie commit is *very* heavy. 5-6ms at chain + // heads, which seems excessive given that it doesn't do hashing, it just + // shuffles some data. For comparison, the *hashing* at chain head is 2-3ms. + // We need to investigate what's happening as it seems something's wonky. + // Obviously it's not an end of the world issue, just something the original + // code didn't anticipate for. + workers.Go(func() error { + // Write the account trie changes, measuring the amount of wasted time + newroot, set, err := s.trie.Commit(true) + if err != nil { + return err + } + root = newroot + + if err := merge(set); err != nil { + return err + } + s.AccountCommits = time.Since(start) + return nil + }) + // Schedule each of the storage tries that need to be updated, so they can + // run concurrently to one another. + // + // TODO(karalabe): Experimentally, the account commit takes approximately the + // same time as all the storage commits combined, so we could maybe only have + // 2 threads in total. But that kind of depends on the account commit being + // more expensive than it should be, so let's fix that and revisit this todo. for addr, op := range s.mutations { if op.isDelete() { continue } - obj := s.stateObjects[addr] - // Write any contract code associated with the state object - if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) - obj.dirtyCode = false - } - // Write any storage changes in the state object to its storage trie - set, err := obj.commit() - if err != nil { - return common.Hash{}, err + obj := s.stateObjects[addr] + if obj == nil { + return nil, errors.New("missing state object") } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err + // Run the storage updates concurrently to one another + workers.Go(func() error { + // Write any storage changes in the state object to its storage trie + update, set, err := obj.commit() + if err != nil { + return err } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted - } - } - s.StorageCommits += time.Since(start) - - if codeWriter.ValueSize() > 0 { - if err := codeWriter.Write(); err != nil { - log.Crit("Failed to commit dirty codes", "error", err) - } - } - // Write the account trie changes, measuring the amount of wasted time - start = time.Now() - - root, set, err := s.trie.Commit(true) - if err != nil { - return common.Hash{}, err + if err := merge(set); err != nil { + return err + } + lock.Lock() + updates[obj.addrHash] = update + lock.Unlock() + s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime + return nil + }) } - // Merge the dirty nodes of account trie into global set - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err - } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() + // Wait for everything to finish and update the metrics + if err := workers.Wait(); err != nil { + return nil, err } - // Report the commit metrics - s.AccountCommits += time.Since(start) - accountUpdatedMeter.Mark(int64(s.AccountUpdated)) - storageUpdatedMeter.Mark(int64(s.StorageUpdated)) + storageUpdatedMeter.Mark(s.StorageUpdated.Load()) accountDeletedMeter.Mark(int64(s.AccountDeleted)) - storageDeletedMeter.Mark(int64(s.StorageDeleted)) + storageDeletedMeter.Mark(s.StorageDeleted.Load()) accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) s.AccountUpdated, s.AccountDeleted = 0, 0 - s.StorageUpdated, s.StorageDeleted = 0, 0 + s.StorageUpdated.Store(0) + s.StorageDeleted.Store(0) - // If snapshotting is enabled, update the snapshot tree with this new version - if s.snap != nil { - start = time.Now() - // Only update if there's a state transition (skip empty Clique blocks) - if parent := s.snap.Root(); parent != root { - if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { - log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) + // Clear all internal flags and update state root at the end. + s.mutations = make(map[common.Address]*mutation) + s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) + + origin := s.originalRoot + s.originalRoot = root + return newStateUpdate(origin, root, deletes, updates, nodes), nil +} + +// commitAndFlush is a wrapper of commit which also commits the state mutations +// to the configured data stores. +func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) { + ret, err := s.commit(deleteEmptyObjects) + if err != nil { + return nil, err + } + // Commit dirty contract code if any exists + if db := s.db.DiskDB(); db != nil && len(ret.codes) > 0 { + batch := db.NewBatch() + for _, code := range ret.codes { + rawdb.WriteCode(batch, code.hash, code.blob) + } + if err := batch.Write(); err != nil { + return nil, err + } + } + if !ret.empty() { + // If snapshotting is enabled, update the snapshot tree with this new version + if s.snap != nil { + s.snap = nil + + start := time.Now() + if err := s.snaps.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil { + log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err) } // Keep 128 diff layers in the memory, persistent layer is 129th. // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, 128); err != nil { - log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) + if err := s.snaps.Cap(ret.root, TriesInMemory); err != nil { + log.Warn("Failed to cap snapshot tree", "root", ret.root, "layers", TriesInMemory, "err", err) } + s.SnapshotCommits += time.Since(start) } - s.SnapshotCommits += time.Since(start) - s.snap = nil - } - if root == (common.Hash{}) { - root = types.EmptyRootHash - } - origin := s.originalRoot - if origin == (common.Hash{}) { - origin = types.EmptyRootHash - } - if root != origin { - start = time.Now() - set := triestate.New(s.accountsOrigin, s.storagesOrigin) - if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { - return common.Hash{}, err + // If trie database is enabled, commit the state update as a new layer + if db := s.db.TrieDB(); db != nil { + start := time.Now() + set := triestate.New(ret.accountsOrigin, ret.storagesOrigin) + if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, set); err != nil { + return nil, err + } + s.TrieDBCommits += time.Since(start) } - s.originalRoot = root - s.TrieDBCommits += time.Since(start) + } + return ret, err +} - if s.onCommit != nil { - s.onCommit(set) - } +// Commit writes the state mutations into the configured data stores. +// +// Once the state is committed, tries cached in stateDB (including account +// trie, storage tries) will no longer be functional. A new state instance +// must be created with new root and updated database for accessing post- +// commit states. +// +// The associated block number of the state transition is also provided +// for more chain context. +func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { + ret, err := s.commitAndFlush(block, deleteEmptyObjects) + if err != nil { + return common.Hash{}, err } - // Clear all internal flags at the end of commit operation. - s.accounts = make(map[common.Hash][]byte) - s.storages = make(map[common.Hash]map[common.Hash][]byte) - s.accountsOrigin = make(map[common.Address][]byte) - s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.mutations = make(map[common.Address]*mutation) - s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) - return root, nil + return ret.root, nil } // Prepare handles the preparatory steps for executing a state transition with. @@ -1280,7 +1307,10 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // - Add coinbase to access list (EIP-3651) // - Reset transient storage (EIP-1153) func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - if rules.IsBerlin { + if rules.IsEIP2929 && rules.IsEIP4762 { + panic("eip2929 and eip4762 are both activated") + } + if rules.IsEIP2929 { // Clear out any leftover from previous executions al := newAccessList() s.accessList = al @@ -1342,41 +1372,9 @@ func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addre return s.accessList.Contains(addr, slot) } -// convertAccountSet converts a provided account set from address keyed to hash keyed. -func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { - ret := make(map[common.Hash]struct{}, len(set)) - for addr := range set { - obj, exist := s.stateObjects[addr] - if !exist { - ret[crypto.Keccak256Hash(addr[:])] = struct{}{} - } else { - ret[obj.addrHash] = struct{}{} - } - } - return ret -} - -// copySet returns a deep-copied set. -func copySet[k comparable](set map[k][]byte) map[k][]byte { - copied := make(map[k][]byte, len(set)) - for key, val := range set { - copied[key] = common.CopyBytes(val) - } - return copied -} - -// copy2DSet returns a two-dimensional deep-copied set. -func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte { - copied := make(map[k]map[common.Hash][]byte, len(set)) - for addr, subset := range set { - copied[addr] = make(map[common.Hash][]byte, len(subset)) - for key, val := range subset { - copied[addr][key] = common.CopyBytes(val) - } - } - return copied -} - +// markDelete is invoked when an account is deleted but the deletion is +// not yet committed. The pending mutation is cached and will be applied +// all together func (s *StateDB) markDelete(addr common.Address) { if _, ok := s.mutations[addr]; !ok { s.mutations[addr] = &mutation{} @@ -1392,3 +1390,7 @@ func (s *StateDB) markUpdate(addr common.Address) { s.mutations[addr].applied = false s.mutations[addr].typ = update } + +func (s *StateDB) PointCache() *utils.PointCache { + return s.db.PointCache() +} diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index 6317681a7fba..40b079cd8a43 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -36,7 +36,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/triestate" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/pathdb" "github.com/holiman/uint256" @@ -180,9 +179,21 @@ func (test *stateTest) run() bool { roots []common.Hash accountList []map[common.Address][]byte storageList []map[common.Address]map[common.Hash][]byte - onCommit = func(states *triestate.Set) { - accountList = append(accountList, copySet(states.Accounts)) - storageList = append(storageList, copy2DSet(states.Storages)) + copyUpdate = func(update *stateUpdate) { + accounts := make(map[common.Address][]byte, len(update.accountsOrigin)) + for key, val := range update.accountsOrigin { + accounts[key] = common.CopyBytes(val) + } + accountList = append(accountList, accounts) + + storages := make(map[common.Address]map[common.Hash][]byte, len(update.storagesOrigin)) + for addr, subset := range update.storagesOrigin { + storages[addr] = make(map[common.Hash][]byte, len(subset)) + for key, val := range subset { + storages[addr][key] = common.CopyBytes(val) + } + } + storageList = append(storageList, storages) } disk = rawdb.NewMemoryDatabase() tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults}) @@ -210,8 +221,6 @@ func (test *stateTest) run() bool { if err != nil { panic(err) } - state.onCommit = onCommit - for i, action := range actions { if i%test.chunk == 0 && i != 0 { if byzantium { @@ -227,14 +236,15 @@ func (test *stateTest) run() bool { } else { state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary } - nroot, err := state.Commit(0, true) // call commit at the block boundary + ret, err := state.commitAndFlush(0, true) // call commit at the block boundary if err != nil { panic(err) } - if nroot == root { - return true // filter out non-change state transition + if ret.empty() { + return true } - roots = append(roots, nroot) + copyUpdate(ret) + roots = append(roots, ret.root) } for i := 0; i < len(test.actions); i++ { root := types.EmptyRootHash diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 71d64f562898..2ce2b868faa9 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -1329,3 +1329,47 @@ func TestDeleteStorage(t *testing.T) { t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) } } + +func TestStorageDirtiness(t *testing.T) { + var ( + disk = rawdb.NewMemoryDatabase() + tdb = triedb.NewDatabase(disk, nil) + db = NewDatabaseWithNodeDB(disk, tdb) + state, _ = New(types.EmptyRootHash, db, nil) + addr = common.HexToAddress("0x1") + checkDirty = func(key common.Hash, value common.Hash, dirty bool) { + obj := state.getStateObject(addr) + v, exist := obj.dirtyStorage[key] + if exist != dirty { + t.Fatalf("Unexpected dirty marker, want: %t, got: %t", dirty, exist) + } + if v != value { + t.Fatalf("Unexpected storage slot, want: %t, got: %t", value, v) + } + } + ) + state.CreateAccount(addr) + + // the storage change is noop, no dirty marker + state.SetState(addr, common.Hash{0x1}, common.Hash{}) + checkDirty(common.Hash{0x1}, common.Hash{}, false) + + // the storage change is valid, dirty marker is expected + snap := state.Snapshot() + state.SetState(addr, common.Hash{0x1}, common.Hash{0x1}) + checkDirty(common.Hash{0x1}, common.Hash{0x1}, true) + + // the storage change is reverted, dirtiness should be revoked + state.RevertToSnapshot(snap) + checkDirty(common.Hash{0x1}, common.Hash{}, false) + + // the storage is reset back to its original value, dirtiness should be revoked + state.SetState(addr, common.Hash{0x1}, common.Hash{0x1}) + snap = state.Snapshot() + state.SetState(addr, common.Hash{0x1}, common.Hash{}) + checkDirty(common.Hash{0x1}, common.Hash{}, false) + + // the storage change is reverted, dirty value should be set back + state.RevertToSnapshot(snap) + checkDirty(common.Hash{0x1}, common.Hash{0x1}, true) +} diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go new file mode 100644 index 000000000000..f3e6af997e44 --- /dev/null +++ b/core/state/stateupdate.go @@ -0,0 +1,133 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +// contractCode represents a contract code with associated metadata. +type contractCode struct { + hash common.Hash // hash is the cryptographic hash of the contract code. + blob []byte // blob is the binary representation of the contract code. +} + +// accountDelete represents an operation for deleting an Ethereum account. +type accountDelete struct { + address common.Address // address is the unique account identifier + origin []byte // origin is the original value of account data in slim-RLP encoding. + storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format. +} + +// accountUpdate represents an operation for updating an Ethereum account. +type accountUpdate struct { + address common.Address // address is the unique account identifier + data []byte // data is the slim-RLP encoded account data. + origin []byte // origin is the original value of account data in slim-RLP encoding. + code *contractCode // code represents mutated contract code; nil means it's not modified. + storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format. + storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format. +} + +// stateUpdate represents the difference between two states resulting from state +// execution. It contains information about mutated contract codes, accounts, +// and storage slots, along with their original values. +type stateUpdate struct { + originRoot common.Hash // hash of the state before applying mutation + root common.Hash // hash of the state after applying mutation + destructs map[common.Hash]struct{} // destructs contains the list of destructed accounts + accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding + accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding + storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format + storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format + codes map[common.Address]contractCode // codes contains the set of dirty codes + nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes +} + +// empty returns a flag indicating the state transition is empty or not. +func (sc *stateUpdate) empty() bool { + return sc.originRoot == sc.root +} + +// newStateUpdate constructs a state update object, representing the differences +// between two states by performing state execution. It aggregates the given +// account deletions and account updates to form a comprehensive state update. +func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate { + var ( + destructs = make(map[common.Hash]struct{}) + accounts = make(map[common.Hash][]byte) + accountsOrigin = make(map[common.Address][]byte) + storages = make(map[common.Hash]map[common.Hash][]byte) + storagesOrigin = make(map[common.Address]map[common.Hash][]byte) + codes = make(map[common.Address]contractCode) + ) + // Due to the fact that some accounts could be destructed and resurrected + // within the same block, the deletions must be aggregated first. + for addrHash, op := range deletes { + addr := op.address + destructs[addrHash] = struct{}{} + accountsOrigin[addr] = op.origin + if len(op.storagesOrigin) > 0 { + storagesOrigin[addr] = op.storagesOrigin + } + } + // Aggregate account updates then. + for addrHash, op := range updates { + // Aggregate dirty contract codes if they are available. + addr := op.address + if op.code != nil { + codes[addr] = *op.code + } + // Aggregate the account changes. The original account value will only + // be tracked if it's not present yet. + accounts[addrHash] = op.data + if _, found := accountsOrigin[addr]; !found { + accountsOrigin[addr] = op.origin + } + // Aggregate the storage changes. The original storage slot value will + // only be tracked if it's not present yet. + if len(op.storages) > 0 { + storages[addrHash] = op.storages + } + if len(op.storagesOrigin) > 0 { + origin := storagesOrigin[addr] + if origin == nil { + storagesOrigin[addr] = op.storagesOrigin + continue + } + for key, slot := range op.storagesOrigin { + if _, found := origin[key]; !found { + origin[key] = slot + } + } + storagesOrigin[addr] = origin + } + } + return &stateUpdate{ + originRoot: types.TrieRootHash(originRoot), + root: types.TrieRootHash(root), + destructs: destructs, + accounts: accounts, + accountsOrigin: accountsOrigin, + storages: storages, + storagesOrigin: storagesOrigin, + codes: codes, + nodes: nodes, + } +} diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index c2a49417d458..ce94ab51396c 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -17,6 +17,7 @@ package state import ( + "errors" "sync" "github.com/ethereum/go-ethereum/common" @@ -27,6 +28,10 @@ import ( var ( // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. triePrefetchMetricsPrefix = "trie/prefetch/" + + // errTerminated is returned if a fetcher is attempted to be operated after it + // has already terminated. + errTerminated = errors.New("fetcher is already terminated") ) // triePrefetcher is an active prefetcher, which receives accounts or storage @@ -37,160 +42,126 @@ var ( type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. fetchers map[string]*subfetcher // Subfetchers for each trie + term chan struct{} // Channel to signal interruption deliveryMissMeter metrics.Meter accountLoadMeter metrics.Meter accountDupMeter metrics.Meter - accountSkipMeter metrics.Meter accountWasteMeter metrics.Meter storageLoadMeter metrics.Meter storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter storageWasteMeter metrics.Meter } func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { prefix := triePrefetchMetricsPrefix + namespace - p := &triePrefetcher{ + return &triePrefetcher{ db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map + term: make(chan struct{}), deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), - accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), } - return p } -// close iterates over all the subfetchers, aborts any that were left spinning -// and reports the stats to the metrics subsystem. -func (p *triePrefetcher) close() { +// terminate iterates over all the subfetchers and issues a termination request +// to all of them. Depending on the async parameter, the method will either block +// until all subfetchers spin down, or return immediately. +func (p *triePrefetcher) terminate(async bool) { + // Short circuit if the fetcher is already closed + select { + case <-p.term: + return + default: + } + // Termiante all sub-fetchers, sync or async, depending on the request for _, fetcher := range p.fetchers { - fetcher.abort() // safe to do multiple times - - if metrics.Enabled { - if fetcher.root == p.root { - p.accountLoadMeter.Mark(int64(len(fetcher.seen))) - p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) - - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.accountWasteMeter.Mark(int64(len(fetcher.seen))) - } else { - p.storageLoadMeter.Mark(int64(len(fetcher.seen))) - p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) - - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.storageWasteMeter.Mark(int64(len(fetcher.seen))) - } - } + fetcher.terminate(async) } - // Clear out all fetchers (will crash on a second call, deliberate) - p.fetchers = nil + close(p.term) } -// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data -// already loaded will be copied over, but no goroutines will be started. This -// is mostly used in the miner which creates a copy of it's actively mutated -// state to be sealed while it may further mutate the state. -func (p *triePrefetcher) copy() *triePrefetcher { - copy := &triePrefetcher{ - db: p.db, - root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetches map - - deliveryMissMeter: p.deliveryMissMeter, - accountLoadMeter: p.accountLoadMeter, - accountDupMeter: p.accountDupMeter, - accountSkipMeter: p.accountSkipMeter, - accountWasteMeter: p.accountWasteMeter, - storageLoadMeter: p.storageLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, +// report aggregates the pre-fetching and usage metrics and reports them. +func (p *triePrefetcher) report() { + if !metrics.Enabled { + return } - // If the prefetcher is already a copy, duplicate the data - if p.fetches != nil { - for root, fetch := range p.fetches { - if fetch == nil { - continue + for _, fetcher := range p.fetchers { + fetcher.wait() // ensure the fetcher's idle before poking in its internals + + if fetcher.root == p.root { + p.accountLoadMeter.Mark(int64(len(fetcher.seen))) + p.accountDupMeter.Mark(int64(fetcher.dups)) + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) } - copy.fetches[root] = p.db.CopyTrie(fetch) + p.accountWasteMeter.Mark(int64(len(fetcher.seen))) + } else { + p.storageLoadMeter.Mark(int64(len(fetcher.seen))) + p.storageDupMeter.Mark(int64(fetcher.dups)) + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) + } + p.storageWasteMeter.Mark(int64(len(fetcher.seen))) } - return copy - } - // Otherwise we're copying an active fetcher, retrieve the current states - for id, fetcher := range p.fetchers { - copy.fetches[id] = fetcher.peek() } - return copy } -// prefetch schedules a batch of trie items to prefetch. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { - // If the prefetcher is an inactive one, bail out - if p.fetches != nil { - return +// prefetch schedules a batch of trie items to prefetch. After the prefetcher is +// closed, all the following tasks scheduled will not be executed and an error +// will be returned. +// +// prefetch is called from two locations: +// +// 1. Finalize of the state-objects storage roots. This happens at the end +// of every transaction, meaning that if several transactions touches +// upon the same contract, the parameters invoking this method may be +// repeated. +// 2. Finalize of the main account trie. This happens only once per block. +func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) error { + // Ensure the subfetcher is still alive + select { + case <-p.term: + return errTerminated + default: } - // Active fetcher, schedule the retrievals id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { fetcher = newSubfetcher(p.db, p.root, owner, root, addr) p.fetchers[id] = fetcher } - fetcher.schedule(keys) + return fetcher.schedule(keys) } -// trie returns the trie matching the root hash, or nil if the prefetcher doesn't -// have it. +// trie returns the trie matching the root hash, blocking until the fetcher of +// the given trie terminates. If no fetcher exists for the request, nil will be +// returned. func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { - // If the prefetcher is inactive, return from existing deep copies - id := p.trieID(owner, root) - if p.fetches != nil { - trie := p.fetches[id] - if trie == nil { - p.deliveryMissMeter.Mark(1) - return nil - } - return p.db.CopyTrie(trie) - } - // Otherwise the prefetcher is active, bail if no trie was prefetched for this root - fetcher := p.fetchers[id] + // Bail if no trie was prefetched for this root + fetcher := p.fetchers[p.trieID(owner, root)] if fetcher == nil { + log.Error("Prefetcher missed to load trie", "owner", owner, "root", root) p.deliveryMissMeter.Mark(1) return nil } - // Interrupt the prefetcher if it's by any chance still running and return - // a copy of any pre-loaded trie. - fetcher.abort() // safe to do multiple times - - trie := fetcher.peek() - if trie == nil { - p.deliveryMissMeter.Mark(1) - return nil - } - return trie + // Subfetcher exists, retrieve its trie + return fetcher.peek() } // used marks a batch of state items used to allow creating statistics as to -// how useful or wasteful the prefetcher is. +// how useful or wasteful the fetcher is. func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { + fetcher.wait() // ensure the fetcher's idle before poking in its internals fetcher.used = used } } @@ -218,10 +189,9 @@ type subfetcher struct { tasks [][]byte // Items queued up for retrieval lock sync.Mutex // Lock protecting the task queue - wake chan struct{} // Wake channel if a new task is scheduled - stop chan struct{} // Channel to interrupt processing - term chan struct{} // Channel to signal interruption - copy chan chan Trie // Channel to request a copy of the current trie + wake chan struct{} // Wake channel if a new task is scheduled + stop chan struct{} // Channel to interrupt processing + term chan struct{} // Channel to signal interruption seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -240,7 +210,6 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo wake: make(chan struct{}, 1), stop: make(chan struct{}), term: make(chan struct{}), - copy: make(chan chan Trie), seen: make(map[string]struct{}), } go sf.loop() @@ -248,50 +217,61 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo } // schedule adds a batch of trie keys to the queue to prefetch. -func (sf *subfetcher) schedule(keys [][]byte) { +func (sf *subfetcher) schedule(keys [][]byte) error { + // Ensure the subfetcher is still alive + select { + case <-sf.term: + return errTerminated + default: + } // Append the tasks to the current queue sf.lock.Lock() sf.tasks = append(sf.tasks, keys...) sf.lock.Unlock() - // Notify the prefetcher, it's fine if it's already terminated + // Notify the background thread to execute scheduled tasks select { case sf.wake <- struct{}{}: + // Wake signal sent default: + // Wake signal not sent as a previous one is already queued } + return nil } -// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it -// is currently. -func (sf *subfetcher) peek() Trie { - ch := make(chan Trie) - select { - case sf.copy <- ch: - // Subfetcher still alive, return copy from it - return <-ch +// wait blocks until the subfetcher terminates. This method is used to block on +// an async termination before accessing internal fields from the fetcher. +func (sf *subfetcher) wait() { + <-sf.term +} - case <-sf.term: - // Subfetcher already terminated, return a copy directly - if sf.trie == nil { - return nil - } - return sf.db.CopyTrie(sf.trie) - } +// peek retrieves the fetcher's trie, populated with any pre-fetched data. The +// returned trie will be a shallow copy, so modifying it will break subsequent +// peeks for the original data. The method will block until all the scheduled +// data has been loaded and the fethcer terminated. +func (sf *subfetcher) peek() Trie { + // Block until the fetcher terminates, then retrieve the trie + sf.wait() + return sf.trie } -// abort interrupts the subfetcher immediately. It is safe to call abort multiple -// times but it is not thread safe. -func (sf *subfetcher) abort() { +// terminate requests the subfetcher to stop accepting new tasks and spin down +// as soon as everything is loaded. Depending on the async parameter, the method +// will either block until all disk loads finish or return immediately. +func (sf *subfetcher) terminate(async bool) { select { case <-sf.stop: default: close(sf.stop) } + if async { + return + } <-sf.term } -// loop waits for new tasks to be scheduled and keeps loading them until it runs -// out of tasks or its underlying trie is retrieved for committing. +// loop loads newly-scheduled trie tasks as they are received and loads them, stopping +// when requested. func (sf *subfetcher) loop() { // No matter how the loop stops, signal anyone waiting that it's terminated defer close(sf.term) @@ -305,8 +285,6 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - // The trie argument can be nil as verkle doesn't support prefetching - // yet. TODO FIX IT(rjl493456442), otherwise code will panic here. trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) @@ -318,48 +296,38 @@ func (sf *subfetcher) loop() { for { select { case <-sf.wake: - // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock + // Execute all remaining tasks in a single run sf.lock.Lock() tasks := sf.tasks sf.tasks = nil sf.lock.Unlock() - // Prefetch any tasks until the loop is interrupted - for i, task := range tasks { - select { - case <-sf.stop: - // If termination is requested, add any leftover back and return - sf.lock.Lock() - sf.tasks = append(sf.tasks, tasks[i:]...) - sf.lock.Unlock() - return - - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - - default: - // No termination request yet, prefetch the next entry - if _, ok := sf.seen[string(task)]; ok { - sf.dups++ - } else { - if len(task) == common.AddressLength { - sf.trie.GetAccount(common.BytesToAddress(task)) - } else { - sf.trie.GetStorage(sf.addr, task) - } - sf.seen[string(task)] = struct{}{} - } + for _, task := range tasks { + if _, ok := sf.seen[string(task)]; ok { + sf.dups++ + continue + } + if len(task) == common.AddressLength { + sf.trie.GetAccount(common.BytesToAddress(task)) + } else { + sf.trie.GetStorage(sf.addr, task) } + sf.seen[string(task)] = struct{}{} } - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - case <-sf.stop: - // Termination is requested, abort and leave remaining tasks - return + // Termination is requested, abort if no more tasks are pending. If + // there are some, exhaust them first. + sf.lock.Lock() + done := sf.tasks == nil + sf.lock.Unlock() + + if done { + return + } + // Some tasks are pending, loop and pick them up (that wake branch + // will be selected eventually, whilst stop remains closed to this + // branch will also run afterwards). } } } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index a616adf98f3a..478407dfbb04 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -19,7 +19,6 @@ package state import ( "math/big" "testing" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -46,68 +45,20 @@ func filledStateDB() *StateDB { return state } -func TestCopyAndClose(t *testing.T) { +func TestUseAfterTerminate(t *testing.T) { db := filledStateDB() prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - time.Sleep(1 * time.Second) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - b := prefetcher.trie(common.Hash{}, db.originalRoot) - cpy := prefetcher.copy() - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - c := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - cpy2 := cpy.copy() - cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - d := cpy2.trie(common.Hash{}, db.originalRoot) - cpy.close() - cpy2.close() - if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() { - t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash()) - } -} -func TestUseAfterClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - b := prefetcher.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } - if b != nil { - t.Fatal("Trie after close should return nil") + if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err != nil { + t.Errorf("Prefetch failed before terminate: %v", err) } -} + prefetcher.terminate(false) -func TestCopyClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy := prefetcher.copy() - a := prefetcher.trie(common.Hash{}, db.originalRoot) - b := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - c := prefetcher.trie(common.Hash{}, db.originalRoot) - d := cpy.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } - if b == nil { - t.Fatal("Copy trie should return nil") - } - if c != nil { - t.Fatal("Trie after close should return nil") + if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err == nil { + t.Errorf("Prefetch succeeded after terminate: %v", err) } - if d == nil { - t.Fatal("Copy trie should not return nil") + if tr := prefetcher.trie(common.Hash{}, db.originalRoot); tr == nil { + t.Errorf("Prefetcher returned nil trie after terminate") } } diff --git a/core/state_processor.go b/core/state_processor.go index abe6353f2c7f..ee6f5f77f939 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -197,6 +197,13 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo // ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root // contract. This method is exported to be used in tests. func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) { + if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallStart != nil { + vmenv.Config.Tracer.OnSystemCallStart() + } + if vmenv.Config.Tracer != nil && vmenv.Config.Tracer.OnSystemCallEnd != nil { + defer vmenv.Config.Tracer.OnSystemCallEnd() + } + // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with // the new root msg := &Message{ diff --git a/core/state_processor_test.go b/core/state_processor_test.go index dc9cb203bcec..af4d29b604da 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -417,10 +417,11 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr header.ParentBeaconRoot = &beaconRoot } // Assemble and return the final block for sealing + body := &types.Body{Transactions: txs} if config.IsShanghai(header.Number, header.Time) { - return types.NewBlockWithWithdrawals(header, txs, nil, receipts, []*types.Withdrawal{}, trie.NewStackTrie(nil)) + body.Withdrawals = []*types.Withdrawal{} } - return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) + return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)) } var ( @@ -481,7 +482,7 @@ func TestProcessVerkle(t *testing.T) { txCost1 := params.TxGas txCost2 := params.TxGas contractCreationCost := intrinsicContractCreationGas + uint64(2039 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(293644 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(57444 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, diff --git a/core/state_transition.go b/core/state_transition.go index d51833aea3a7..4fa2c1378642 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -68,7 +68,7 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860 bool, isRIP7560InnerFrame ...bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028, isEIP3860 bool, isRIP7560InnerFrame ...bool) (uint64, error) { if isRIP7560InnerFrame != nil && len(isRIP7560InnerFrame) > 0 && isRIP7560InnerFrame[0] { return 0, nil } @@ -244,8 +244,9 @@ func (st *StateTransition) buyGas() error { if st.msg.GasFeeCap != nil { balanceCheck.SetUint64(st.msg.GasLimit) balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) - balanceCheck.Add(balanceCheck, st.msg.Value) } + balanceCheck.Add(balanceCheck, st.msg.Value) + if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if blobGas := st.blobGasUsed(); blobGas > 0 { // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap @@ -418,6 +419,14 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } st.gasRemaining -= gas + if rules.IsEIP4762 { + st.evm.AccessEvents.AddTxOrigin(msg.From) + + if targetAddr := msg.To; targetAddr != nil { + st.evm.AccessEvents.AddTxDestination(*targetAddr, msg.Value.Sign() != 0) + } + } + // Check clause 6 value, overflow := uint256.FromBig(msg.Value) if overflow { @@ -471,6 +480,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { fee := new(uint256.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTipU256) st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee) + + // add the coinbase to the witness iff the fee is greater than 0 + if rules.IsEIP4762 && fee.Sign() != 0 { + st.evm.AccessEvents.BalanceGas(st.evm.Context.Coinbase, true) + } } return &ExecutionResult{ diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md index 77eda4ad7627..93b91cf479b5 100644 --- a/core/tracing/CHANGELOG.md +++ b/core/tracing/CHANGELOG.md @@ -4,6 +4,15 @@ All notable changes to the tracing interface will be documented in this file. ## [Unreleased] +There have been minor backwards-compatible changes to the tracing interface to explicitly mark the execution of **system** contracts. As of now the only system call updates the parent beacon block root as per [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788). Other system calls are being considered for the future hardfork. + +### New methods + +- `OnSystemCallStart()`: This hook is called when EVM starts processing a system call. Note system calls happen outside the scope of a transaction. This event will be followed by normal EVM execution events. +- `OnSystemCallEnd()`: This hook is called when EVM finishes processing a system call. + +## [v1.14.0] + There has been a major breaking change in the tracing interface for custom native tracers. JS and built-in tracers are not affected by this change and tracing API methods may be used as before. This overhaul has been done as part of the new live tracing feature ([#29189](https://github.com/ethereum/go-ethereum/pull/29189)). To learn more about live tracing please refer to the [docs](https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing). **The `EVMLogger` interface which the tracers implemented has been removed.** It has been replaced by a new struct `tracing.Hooks`. `Hooks` keeps pointers to event listening functions. Internally the EVM will use these function pointers to emit events and can skip an event if the tracer has opted not to implement it. In fact this is the main reason for this change of approach. Another benefit is the ease of adding new hooks in future, and dynamically assigning event receivers. @@ -66,4 +75,5 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale - `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract. - `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above. -[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.13.14...master \ No newline at end of file +[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.0...master +[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0 \ No newline at end of file diff --git a/core/tracing/gen_balance_change_reason_stringer.go b/core/tracing/gen_balance_change_reason_stringer.go new file mode 100644 index 000000000000..d3a515a12d37 --- /dev/null +++ b/core/tracing/gen_balance_change_reason_stringer.go @@ -0,0 +1,37 @@ +// Code generated by "stringer -type=BalanceChangeReason -output gen_balance_change_reason_stringer.go"; DO NOT EDIT. + +package tracing + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BalanceChangeUnspecified-0] + _ = x[BalanceIncreaseRewardMineUncle-1] + _ = x[BalanceIncreaseRewardMineBlock-2] + _ = x[BalanceIncreaseWithdrawal-3] + _ = x[BalanceIncreaseGenesisBalance-4] + _ = x[BalanceIncreaseRewardTransactionFee-5] + _ = x[BalanceDecreaseGasBuy-6] + _ = x[BalanceIncreaseGasReturn-7] + _ = x[BalanceIncreaseDaoContract-8] + _ = x[BalanceDecreaseDaoAccount-9] + _ = x[BalanceChangeTransfer-10] + _ = x[BalanceChangeTouchAccount-11] + _ = x[BalanceIncreaseSelfdestruct-12] + _ = x[BalanceDecreaseSelfdestruct-13] + _ = x[BalanceDecreaseSelfdestructBurn-14] +} + +const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurn" + +var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400} + +func (i BalanceChangeReason) String() string { + if i >= BalanceChangeReason(len(_BalanceChangeReason_index)-1) { + return "BalanceChangeReason(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BalanceChangeReason_name[_BalanceChangeReason_index[i]:_BalanceChangeReason_index[i+1]] +} diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index 9ca6ee39fbe7..db058e847c0d 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -81,6 +81,10 @@ type ( TxEndHook = func(receipt *types.Receipt, err error) // EnterHook is invoked when the processing of a message starts. + // + // Take note that EnterHook, when in the context of a live tracer, can be invoked + // outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls, + // see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information. EnterHook = func(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) // ExitHook is invoked when the processing of a message ends. @@ -89,6 +93,10 @@ type ( // ran out of gas when attempting to persist the code to database did not // count as a call failure and did not cause a revert of the call. This will // be indicated by `reverted == false` and `err == ErrCodeStoreOutOfGas`. + // + // Take note that ExitHook, when in the context of a live tracer, can be invoked + // outside of the `OnTxStart` and `OnTxEnd` hooks when dealing with system calls, + // see [OnSystemCallStartHook] and [OnSystemCallEndHook] for more information. ExitHook = func(depth int, output []byte, gasUsed uint64, err error, reverted bool) // OpcodeHook is invoked just prior to the execution of an opcode. @@ -125,6 +133,22 @@ type ( // GenesisBlockHook is called when the genesis block is being processed. GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc) + // OnSystemCallStartHook is called when a system call is about to be executed. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + // + // After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit` + // as well as state hooks between this hook and the `OnSystemCallEndHook`. + // + // Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks + // will not be invoked. + OnSystemCallStartHook = func() + + // OnSystemCallEndHook is called when a system call has finished executing. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + OnSystemCallEndHook = func() + /* - State events - */ @@ -155,12 +179,14 @@ type Hooks struct { OnFault FaultHook OnGasChange GasChangeHook // Chain events - OnBlockchainInit BlockchainInitHook - OnClose CloseHook - OnBlockStart BlockStartHook - OnBlockEnd BlockEndHook - OnSkippedBlock SkippedBlockHook - OnGenesisBlock GenesisBlockHook + OnBlockchainInit BlockchainInitHook + OnClose CloseHook + OnBlockStart BlockStartHook + OnBlockEnd BlockEndHook + OnSkippedBlock SkippedBlockHook + OnGenesisBlock GenesisBlockHook + OnSystemCallStart OnSystemCallStartHook + OnSystemCallEnd OnSystemCallEndHook // State events OnBalanceChange BalanceChangeHook OnNonceChange NonceChangeHook @@ -173,6 +199,8 @@ type Hooks struct { // for tracing and reporting. type BalanceChangeReason byte +//go:generate stringer -type=BalanceChangeReason -output gen_balance_change_reason_stringer.go + const ( BalanceChangeUnspecified BalanceChangeReason = 0 @@ -272,6 +300,12 @@ const ( GasChangeCallStorageColdAccess GasChangeReason = 13 // GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert. GasChangeCallFailedExecution GasChangeReason = 14 + // GasChangeWitnessContractInit is the amount charged for adding to the witness during the contract creation initialization step + GasChangeWitnessContractInit GasChangeReason = 15 + // GasChangeWitnessContractCreation is the amount charged for adding to the witness during the contract creation finalization step + GasChangeWitnessContractCreation GasChangeReason = 16 + // GasChangeWitnessCodeChunk is the amount charged for touching one or more contract code chunks + GasChangeWitnessCodeChunk GasChangeReason = 17 // GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as // it will be "manually" tracked by a direct emit of the gas change event. diff --git a/core/txindexer_test.go b/core/txindexer_test.go index 7b5ff1f206b2..0a606ed8fa6f 100644 --- a/core/txindexer_test.go +++ b/core/txindexer_test.go @@ -18,7 +18,6 @@ package core import ( "math/big" - "os" "testing" "github.com/ethereum/go-ethereum/common" @@ -211,8 +210,7 @@ func TestTxIndexer(t *testing.T) { }, } for _, c := range cases { - frdir := t.TempDir() - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) // Index the initial blocks from ancient store @@ -238,6 +236,5 @@ func TestTxIndexer(t *testing.T) { verify(db, 0, indexer) db.Close() - os.RemoveAll(frdir) } } diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 68d7b6f411fa..c86991c942da 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -87,7 +87,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(bc.CurrentBlock(), nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { diff --git a/core/types/block.go b/core/types/block.go index 53054f52d3b9..4857cd6e50c8 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -23,6 +23,7 @@ import ( "io" "math/big" "reflect" + "slices" "sync/atomic" "time" @@ -217,13 +218,19 @@ type extblock struct { // NewBlock creates a new block. The input data is copied, changes to header and to the // field values will not affect the block. // -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header -// are ignored and set to values derived from the given txs, uncles -// and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block { - b := &Block{header: CopyHeader(header)} +// The body elements and the receipts are used to recompute and overwrite the +// relevant portions of the header. +func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher) *Block { + if body == nil { + body = &Body{} + } + var ( + b = NewBlockWithHeader(header) + txs = body.Transactions + uncles = body.Uncles + withdrawals = body.Withdrawals + ) - // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { b.header.TxHash = EmptyTxsHash } else { @@ -249,27 +256,18 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } } - return b -} - -// NewBlockWithWithdrawals creates a new block with withdrawals. The input data is copied, -// changes to header and to the field values will not affect the block. -// -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header are ignored and set to -// values derived from the given txs, uncles and receipts. -func NewBlockWithWithdrawals(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal, hasher TrieHasher) *Block { - b := NewBlock(header, txs, uncles, receipts, hasher) - if withdrawals == nil { b.header.WithdrawalsHash = nil } else if len(withdrawals) == 0 { b.header.WithdrawalsHash = &EmptyWithdrawalsHash + b.withdrawals = Withdrawals{} } else { - h := DeriveSha(Withdrawals(withdrawals), hasher) - b.header.WithdrawalsHash = &h + hash := DeriveSha(Withdrawals(withdrawals), hasher) + b.header.WithdrawalsHash = &hash + b.withdrawals = slices.Clone(withdrawals) } - return b.WithWithdrawals(withdrawals) + return b } // CopyHeader creates a deep copy of a block header. @@ -453,31 +451,17 @@ func (b *Block) WithSeal(header *Header) *Block { } } -// WithBody returns a copy of the block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { - block := &Block{ - header: b.header, - transactions: make([]*Transaction, len(transactions)), - uncles: make([]*Header, len(uncles)), - withdrawals: b.withdrawals, - } - copy(block.transactions, transactions) - for i := range uncles { - block.uncles[i] = CopyHeader(uncles[i]) - } - return block -} - -// WithWithdrawals returns a copy of the block containing the given withdrawals. -func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { +// WithBody returns a new block with the original header and a deep copy of the +// provided body. +func (b *Block) WithBody(body Body) *Block { block := &Block{ header: b.header, - transactions: b.transactions, - uncles: b.uncles, + transactions: slices.Clone(body.Transactions), + uncles: make([]*Header, len(body.Uncles)), + withdrawals: slices.Clone(body.Withdrawals), } - if withdrawals != nil { - block.withdrawals = make([]*Withdrawal, len(withdrawals)) - copy(block.withdrawals, withdrawals) + for i := range body.Uncles { + block.uncles[i] = CopyHeader(body.Uncles[i]) } return block } diff --git a/core/types/block_test.go b/core/types/block_test.go index 982d002242f6..1af5b9d7bf2c 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -254,7 +254,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher()) + return NewBlock(header, &Body{Transactions: txs, Uncles: uncles}, receipts, blocktest.NewHasher()) } func TestRlpDecodeParentHash(t *testing.T) { diff --git a/core/types/transaction.go b/core/types/transaction.go index 996fee8e76be..b624538ec535 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -565,7 +565,7 @@ func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) { } } -// TxDifference returns a new set which is the difference between a and b. +// TxDifference returns a new set of transactions that are present in a but not in b. func TxDifference(a, b Transactions) Transactions { keep := make(Transactions, 0, len(a)) @@ -583,7 +583,7 @@ func TxDifference(a, b Transactions) Transactions { return keep } -// HashDifference returns a new set which is the difference between a and b. +// HashDifference returns a new set of hashes that are present in a but not in b. func HashDifference(a, b []common.Hash) []common.Hash { keep := make([]common.Hash, 0, len(a)) diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 361b977611c2..5dbf367073b5 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -379,7 +379,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } if orig.AccessList() != nil { if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) { - return errors.New("access list wrong!") + return errors.New("access list wrong") } } return nil diff --git a/core/vm/common.go b/core/vm/common.go index 90ba4a4ad15b..ba75950e370b 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -63,6 +63,18 @@ func getData(data []byte, start uint64, size uint64) []byte { return common.RightPadBytes(data[start:end], int(size)) } +func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyPadded []byte, actualStart uint64, sizeNonPadded uint64) { + length := uint64(len(data)) + if start > length { + start = length + } + end := start + size + if end > length { + end = length + } + return common.RightPadBytes(data[start:end], int(size)), start, end - start +} + // toWordSize returns the ceiled word size required for memory expansion. func toWordSize(size uint64) uint64 { if size > math.MaxUint64-31 { diff --git a/core/vm/contract.go b/core/vm/contract.go index 4e28260a67b7..cfda75b27e11 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -57,6 +57,9 @@ type Contract struct { CodeAddr *common.Address Input []byte + // is the execution frame represented by this object a contract deployment + IsDeployment bool + Gas uint64 value *uint256.Int } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 5f7de8007b51..8e0f8467752e 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -137,6 +137,8 @@ var PrecompiledContractsPrague = map[common.Address]PrecompiledContract{ var PrecompiledContractsBLS = PrecompiledContractsPrague +var PrecompiledContractsVerkle = PrecompiledContractsPrague + var ( PrecompiledAddressesPrague []common.Address PrecompiledAddressesCancun []common.Address @@ -705,6 +707,8 @@ func (c *bls12381G1Add) Run(input []byte) ([]byte, error) { return nil, err } + // No need to check the subgroup here, as specified by EIP-2537 + // Compute r = p_0 + p_1 p0.Add(p0, p1) @@ -734,6 +738,11 @@ func (c *bls12381G1Mul) Run(input []byte) ([]byte, error) { if p0, err = decodePointG1(input[:128]); err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p0.IsInSubGroup() { + return nil, errBLS12381G1PointSubgroup + } // Decode scalar value e := new(big.Int).SetBytes(input[128:]) @@ -787,6 +796,11 @@ func (c *bls12381G1MultiExp) Run(input []byte) ([]byte, error) { if err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p.IsInSubGroup() { + return nil, errBLS12381G1PointSubgroup + } points[i] = *p // Decode scalar value scalars[i] = *new(fr.Element).SetBytes(input[t1:t2]) @@ -827,6 +841,8 @@ func (c *bls12381G2Add) Run(input []byte) ([]byte, error) { return nil, err } + // No need to check the subgroup here, as specified by EIP-2537 + // Compute r = p_0 + p_1 r := new(bls12381.G2Affine) r.Add(p0, p1) @@ -857,6 +873,11 @@ func (c *bls12381G2Mul) Run(input []byte) ([]byte, error) { if p0, err = decodePointG2(input[:256]); err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p0.IsInSubGroup() { + return nil, errBLS12381G2PointSubgroup + } // Decode scalar value e := new(big.Int).SetBytes(input[256:]) @@ -910,6 +931,11 @@ func (c *bls12381G2MultiExp) Run(input []byte) ([]byte, error) { if err != nil { return nil, err } + // 'point is on curve' check already done, + // Here we need to apply subgroup checks. + if !p.IsInSubGroup() { + return nil, errBLS12381G2PointSubgroup + } points[i] = *p // Decode scalar value scalars[i] = *new(fr.Element).SetBytes(input[t1:t2]) @@ -1099,9 +1125,6 @@ func (c *bls12381MapG1) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG1(fe) - if err != nil { - return nil, err - } // Encode the G1 point to 128 bytes return encodePointG1(&r), nil @@ -1135,9 +1158,6 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG2(bls12381.E2{A0: c0, A1: c1}) - if err != nil { - return nil, err - } // Encode the G2 point to 256 bytes return encodePointG2(&r), nil diff --git a/core/vm/eips.go b/core/vm/eips.go index 9f06b2818fee..edd6ec8d0a2c 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -18,9 +18,11 @@ package vm import ( "fmt" + "math" "sort" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) @@ -37,6 +39,7 @@ var activators = map[int]func(*JumpTable){ 1884: enable1884, 1344: enable1344, 1153: enable1153, + 4762: enable4762, } // EnableEIP enables the given EIP on the config. @@ -319,3 +322,214 @@ func enable6780(jt *JumpTable) { maxStack: maxStack(1, 0), } } + +func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + stack = scope.Stack + a = stack.pop() + memOffset = stack.pop() + codeOffset = stack.pop() + length = stack.pop() + ) + uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() + if overflow { + uint64CodeOffset = math.MaxUint64 + } + addr := common.Address(a.Bytes20()) + code := interpreter.evm.StateDB.GetCode(addr) + contract := &Contract{ + Code: code, + self: AccountRef(addr), + } + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) + statelessGas := interpreter.evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), false) + if !scope.Contract.UseGas(statelessGas, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) + + return nil, nil +} + +// opPush1EIP4762 handles the special case of PUSH1 opcode for EIP-4762, which +// need not worry about the adjusted bound logic when adding the PUSHDATA to +// the list of access events. +func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + codeLen = uint64(len(scope.Contract.Code)) + integer = new(uint256.Int) + ) + *pc += 1 + if *pc < codeLen { + scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) + + if !scope.Contract.IsDeployment && *pc%31 == 0 { + // touch next chunk if PUSH1 is at the boundary. if so, *pc has + // advanced past this boundary. + contractAddr := scope.Contract.Address() + statelessGas := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false) + if !scope.Contract.UseGas(statelessGas, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } + } else { + scope.Stack.push(integer.Clear()) + } + return nil, nil +} + +func makePushEIP4762(size uint64, pushByteSize int) executionFunc { + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + codeLen = len(scope.Contract.Code) + start = min(codeLen, int(*pc+1)) + end = min(codeLen, start+pushByteSize) + ) + scope.Stack.push(new(uint256.Int).SetBytes( + common.RightPadBytes( + scope.Contract.Code[start:end], + pushByteSize, + )), + ) + + if !scope.Contract.IsDeployment { + contractAddr := scope.Contract.Address() + statelessGas := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false) + if !scope.Contract.UseGas(statelessGas, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } + + *pc += size + return nil, nil + } +} + +func enable4762(jt *JumpTable) { + jt[SSTORE] = &operation{ + dynamicGas: gasSStore4762, + execute: opSstore, + minStack: minStack(2, 0), + maxStack: maxStack(2, 0), + } + jt[SLOAD] = &operation{ + dynamicGas: gasSLoad4762, + execute: opSload, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[BALANCE] = &operation{ + execute: opBalance, + dynamicGas: gasBalance4762, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[EXTCODESIZE] = &operation{ + execute: opExtCodeSize, + dynamicGas: gasExtCodeSize4762, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[EXTCODEHASH] = &operation{ + execute: opExtCodeHash, + dynamicGas: gasExtCodeHash4762, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[EXTCODECOPY] = &operation{ + execute: opExtCodeCopyEIP4762, + dynamicGas: gasExtCodeCopyEIP4762, + minStack: minStack(4, 0), + maxStack: maxStack(4, 0), + memorySize: memoryExtCodeCopy, + } + + jt[CODECOPY] = &operation{ + execute: opCodeCopy, + constantGas: GasFastestStep, + dynamicGas: gasCodeCopyEip4762, + minStack: minStack(3, 0), + maxStack: maxStack(3, 0), + memorySize: memoryCodeCopy, + } + + jt[SELFDESTRUCT] = &operation{ + execute: opSelfdestruct6780, + dynamicGas: gasSelfdestructEIP4762, + constantGas: params.SelfdestructGasEIP150, + minStack: minStack(1, 0), + maxStack: maxStack(1, 0), + } + + jt[CREATE] = &operation{ + execute: opCreate, + constantGas: params.CreateNGasEip4762, + dynamicGas: gasCreateEip3860, + minStack: minStack(3, 1), + maxStack: maxStack(3, 1), + memorySize: memoryCreate, + } + + jt[CREATE2] = &operation{ + execute: opCreate2, + constantGas: params.CreateNGasEip4762, + dynamicGas: gasCreate2Eip3860, + minStack: minStack(4, 1), + maxStack: maxStack(4, 1), + memorySize: memoryCreate2, + } + + jt[CALL] = &operation{ + execute: opCall, + dynamicGas: gasCallEIP4762, + minStack: minStack(7, 1), + maxStack: maxStack(7, 1), + memorySize: memoryCall, + } + + jt[CALLCODE] = &operation{ + execute: opCallCode, + dynamicGas: gasCallCodeEIP4762, + minStack: minStack(7, 1), + maxStack: maxStack(7, 1), + memorySize: memoryCall, + } + + jt[STATICCALL] = &operation{ + execute: opStaticCall, + dynamicGas: gasStaticCallEIP4762, + minStack: minStack(6, 1), + maxStack: maxStack(6, 1), + memorySize: memoryStaticCall, + } + + jt[DELEGATECALL] = &operation{ + execute: opDelegateCall, + dynamicGas: gasDelegateCallEIP4762, + minStack: minStack(6, 1), + maxStack: maxStack(6, 1), + memorySize: memoryDelegateCall, + } + + jt[PUSH1] = &operation{ + execute: opPush1EIP4762, + constantGas: GasFastestStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } + for i := 1; i < 32; i++ { + jt[PUSH1+OpCode(i)] = &operation{ + execute: makePushEIP4762(uint64(i+1), i+1), + constantGas: GasFastestStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } + } +} diff --git a/core/vm/evm.go b/core/vm/evm.go index c18353a97354..26af0ea041b8 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -22,6 +22,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -42,6 +43,8 @@ type ( func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { + case evm.chainRules.IsVerkle: + precompiles = PrecompiledContractsVerkle case evm.chainRules.IsPrague: precompiles = PrecompiledContractsPrague case evm.chainRules.IsCancun: @@ -85,10 +88,11 @@ type BlockContext struct { // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set) - BlobHashes []common.Hash // Provides information for BLOBHASH - BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set) + BlobHashes []common.Hash // Provides information for BLOBHASH + BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set + AccessEvents *state.AccessEvents // Capture all state accesses for this tx } // EVM is the Ethereum Virtual Machine base object and provides @@ -156,6 +160,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { + if evm.chainRules.IsEIP4762 { + txCtx.AccessEvents = state.NewAccessEvents(statedb.PointCache()) + } evm.TxContext = txCtx evm.StateDB = statedb } @@ -200,6 +207,16 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas p, isPrecompile := evm.precompile(addr) if !evm.StateDB.Exist(addr) { + if !isPrecompile && evm.chainRules.IsEIP4762 { + // add proof of absence to witness + wgas := evm.AccessEvents.AddAccount(addr, false) + if gas < wgas { + evm.StateDB.RevertToSnapshot(snapshot) + return nil, 0, ErrOutOfGas + } + gas -= wgas + } + if !isPrecompile && evm.chainRules.IsEIP158 && value.IsZero() { // Calling a non-existing account, don't do anything. return nil, gas, nil @@ -439,7 +456,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // We add this to the access list _before_ taking a snapshot. Even if the // creation fails, the access-list change should not be rolled back. - if evm.chainRules.IsBerlin { + if evm.chainRules.IsEIP2929 { evm.StateDB.AddAddressToAccessList(address) } // Ensure there's no existing contract already at the designated address. @@ -479,8 +496,18 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // The contract is a scoped environment for this execution context only. contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) + contract.IsDeployment = true - ret, err = evm.interpreter.Run(contract, nil, false) + // Charge the contract creation init gas in verkle mode + if evm.chainRules.IsEIP4762 { + if !contract.UseGas(evm.AccessEvents.ContractCreateInitGas(address, value.Sign() != 0), evm.Config.Tracer, tracing.GasChangeWitnessContractInit) { + err = ErrOutOfGas + } + } + + if err == nil { + ret, err = evm.interpreter.Run(contract, nil, false) + } // Check whether the max code size has been exceeded, assign err if the case. if err == nil && evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize { @@ -497,11 +524,24 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // be stored due to not enough gas set an error and let it be handled // by the error checking condition below. if err == nil { - createDataGas := uint64(len(ret)) * params.CreateDataGas - if contract.UseGas(createDataGas, evm.Config.Tracer, tracing.GasChangeCallCodeStorage) { - evm.StateDB.SetCode(address, ret) + if !evm.chainRules.IsEIP4762 { + createDataGas := uint64(len(ret)) * params.CreateDataGas + if !contract.UseGas(createDataGas, evm.Config.Tracer, tracing.GasChangeCallCodeStorage) { + err = ErrCodeStoreOutOfGas + } } else { - err = ErrCodeStoreOutOfGas + // Contract creation completed, touch the missing fields in the contract + if !contract.UseGas(evm.AccessEvents.AddAccount(address, true), evm.Config.Tracer, tracing.GasChangeWitnessContractCreation) { + err = ErrCodeStoreOutOfGas + } + + if err == nil && len(ret) > 0 && !contract.UseGas(evm.AccessEvents.CodeChunksRangeGas(address, 0, uint64(len(ret)), uint64(len(ret)), true), evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk) { + err = ErrCodeStoreOutOfGas + } + } + + if err == nil { + evm.StateDB.SetCode(address, ret) } } diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index fd5fa14cf5d7..d294324b08c2 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -383,7 +383,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize } else if !evm.StateDB.Exist(address) { gas += params.CallNewAccountGas } - if transfersValue { + if transfersValue && !evm.chainRules.IsEIP4762 { gas += params.CallValueTransferGas } memoryGas, err := memoryGasCost(mem, memorySize) @@ -394,7 +394,14 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { return 0, ErrGasUintOverflow } - + if evm.chainRules.IsEIP4762 { + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.AccessEvents.ValueTransferGas(contract.Address(), address)) + if overflow { + return 0, ErrGasUintOverflow + } + } + } evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0)) if err != nil { return 0, err @@ -402,6 +409,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + return gas, nil } @@ -414,12 +422,22 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory gas uint64 overflow bool ) - if stack.Back(2).Sign() != 0 { + if stack.Back(2).Sign() != 0 && !evm.chainRules.IsEIP4762 { gas += params.CallValueTransferGas } if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsEIP4762 { + address := common.Address(stack.Back(1).Bytes20()) + transfersValue := !stack.Back(2).IsZero() + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.AccessEvents.ValueTransferGas(contract.Address(), address)) + if overflow { + return 0, ErrGasUintOverflow + } + } + } evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0)) if err != nil { return 0, err diff --git a/core/vm/instructions.go b/core/vm/instructions.go index a062bb15ff5c..10cdd72e0c57 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -173,11 +173,7 @@ func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek() - if z.IsZero() { - z.Clear() - } else { - z.AddMod(&x, &y, z) - } + z.AddMod(&x, &y, z) return nil, nil } @@ -363,9 +359,9 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ if overflow { uint64CodeOffset = math.MaxUint64 } + codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) - return nil, nil } @@ -438,6 +434,7 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( num.Clear() return nil, nil } + var upper, lower uint64 upper = interpreter.evm.Context.BlockNumber.Uint64() if upper < 257 { @@ -587,6 +584,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 } + // reuse size int for stackvalue stackvalue := size @@ -627,6 +625,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation2) @@ -641,7 +640,6 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] stackvalue.SetBytes(addr.Bytes()) } scope.Stack.push(&stackvalue) - scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) if suberr == ErrExecutionReverted { @@ -900,6 +898,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { pushByteSize, )), ) + *pc += size return nil, nil } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 8653864d11e4..e17e913aa3ce 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -643,7 +643,7 @@ func BenchmarkOpKeccak256(bench *testing.B) { } } -func TestCreate2Addreses(t *testing.T) { +func TestCreate2Addresses(t *testing.T) { type testcase struct { origin string salt string diff --git a/core/vm/interface.go b/core/vm/interface.go index 774360a08ef9..8b2c58898ec6 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -75,6 +76,10 @@ type StateDB interface { // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform // even if the feature/fork is not active yet AddSlotToAccessList(addr common.Address, slot common.Hash) + + // PointCache returns the point cache used in computations + PointCache() *utils.PointCache + Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) RevertToSnapshot(int) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 406927e32158..66a20f434e85 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -99,6 +99,9 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { + case evm.chainRules.IsVerkle: + // TODO replace with proper instruction set when fork is specified + table = &verkleInstructionSet case evm.chainRules.IsCancun: table = &cancunInstructionSet case evm.chainRules.IsShanghai: @@ -219,6 +222,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } + + if in.evm.chainRules.IsEIP4762 && !contract.IsDeployment { + // if the PC ends up in a new "chunk" of verkleized code, charge the + // associated costs. + contractAddr := contract.Address() + contract.Gas -= in.evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false) + } + // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 65716f9442af..5624f47ba72c 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -57,6 +57,7 @@ var ( mergeInstructionSet = newMergeInstructionSet() shanghaiInstructionSet = newShanghaiInstructionSet() cancunInstructionSet = newCancunInstructionSet() + verkleInstructionSet = newVerkleInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -80,6 +81,12 @@ func validate(jt JumpTable) JumpTable { return jt } +func newVerkleInstructionSet() JumpTable { + instructionSet := newCancunInstructionSet() + enable4762(&instructionSet) + return validate(instructionSet) +} + func newCancunInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode) diff --git a/core/vm/operations_verkle.go b/core/vm/operations_verkle.go new file mode 100644 index 000000000000..73eb05974dc0 --- /dev/null +++ b/core/vm/operations_verkle.go @@ -0,0 +1,159 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/params" +) + +func gasSStore4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas := evm.AccessEvents.SlotGas(contract.Address(), stack.peek().Bytes32(), true) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasSLoad4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas := evm.AccessEvents.SlotGas(contract.Address(), stack.peek().Bytes32(), false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasBalance4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + gas := evm.AccessEvents.BalanceGas(address, false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasExtCodeSize4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + if _, isPrecompile := evm.precompile(address); isPrecompile { + return 0, nil + } + gas := evm.AccessEvents.VersionGas(address, false) + gas += evm.AccessEvents.CodeSizeGas(address, false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasExtCodeHash4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + if _, isPrecompile := evm.precompile(address); isPrecompile { + return 0, nil + } + gas := evm.AccessEvents.CodeHashGas(address, false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func makeCallVariantGasEIP4762(oldCalculator gasFunc) gasFunc { + return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := oldCalculator(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + if _, isPrecompile := evm.precompile(contract.Address()); isPrecompile { + return gas, nil + } + witnessGas := evm.AccessEvents.MessageCallGas(contract.Address()) + if witnessGas == 0 { + witnessGas = params.WarmStorageReadCostEIP2929 + } + return witnessGas + gas, nil + } +} + +var ( + gasCallEIP4762 = makeCallVariantGasEIP4762(gasCall) + gasCallCodeEIP4762 = makeCallVariantGasEIP4762(gasCallCode) + gasStaticCallEIP4762 = makeCallVariantGasEIP4762(gasStaticCall) + gasDelegateCallEIP4762 = makeCallVariantGasEIP4762(gasDelegateCall) +) + +func gasSelfdestructEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + beneficiaryAddr := common.Address(stack.peek().Bytes20()) + if _, isPrecompile := evm.precompile(beneficiaryAddr); isPrecompile { + return 0, nil + } + contractAddr := contract.Address() + statelessGas := evm.AccessEvents.VersionGas(contractAddr, false) + statelessGas += evm.AccessEvents.CodeSizeGas(contractAddr, false) + statelessGas += evm.AccessEvents.BalanceGas(contractAddr, false) + if contractAddr != beneficiaryAddr { + statelessGas += evm.AccessEvents.BalanceGas(beneficiaryAddr, false) + } + // Charge write costs if it transfers value + if evm.StateDB.GetBalance(contractAddr).Sign() != 0 { + statelessGas += evm.AccessEvents.BalanceGas(contractAddr, true) + if contractAddr != beneficiaryAddr { + statelessGas += evm.AccessEvents.BalanceGas(beneficiaryAddr, true) + } + } + return statelessGas, nil +} + +func gasCodeCopyEip4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := gasCodeCopy(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + var ( + codeOffset = stack.Back(1) + length = stack.Back(2) + ) + uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() + if overflow { + uint64CodeOffset = math.MaxUint64 + } + _, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(contract.Code, uint64CodeOffset, length.Uint64()) + if !contract.IsDeployment { + gas += evm.AccessEvents.CodeChunksRangeGas(contract.Address(), copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), false) + } + return gas, nil +} + +func gasExtCodeCopyEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // memory expansion first (dynamic part of pre-2929 implementation) + gas, err := gasExtCodeCopy(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + addr := common.Address(stack.peek().Bytes20()) + wgas := evm.AccessEvents.VersionGas(addr, false) + wgas += evm.AccessEvents.CodeSizeGas(addr, false) + if wgas == 0 { + wgas = params.WarmStorageReadCostEIP2929 + } + var overflow bool + // We charge (cold-warm), since 'warm' is already charged as constantGas + if gas, overflow = math.SafeAdd(gas, wgas); overflow { + return 0, ErrGasUintOverflow + } + return gas, nil +} diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index b587d6d5a044..1181e5fccdc3 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -57,24 +57,33 @@ type Config struct { // sets defaults on the config func setDefaults(cfg *Config) { if cfg.ChainConfig == nil { + var ( + shanghaiTime = uint64(0) + cancunTime = uint64(0) + ) cfg.ChainConfig = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: new(big.Int), - DAOForkBlock: new(big.Int), - DAOForkSupport: false, - EIP150Block: new(big.Int), - EIP155Block: new(big.Int), - EIP158Block: new(big.Int), - ByzantiumBlock: new(big.Int), - ConstantinopleBlock: new(big.Int), - PetersburgBlock: new(big.Int), - IstanbulBlock: new(big.Int), - MuirGlacierBlock: new(big.Int), - BerlinBlock: new(big.Int), - LondonBlock: new(big.Int), - } + ChainID: big.NewInt(1), + HomesteadBlock: new(big.Int), + DAOForkBlock: new(big.Int), + DAOForkSupport: false, + EIP150Block: new(big.Int), + EIP155Block: new(big.Int), + EIP158Block: new(big.Int), + ByzantiumBlock: new(big.Int), + ConstantinopleBlock: new(big.Int), + PetersburgBlock: new(big.Int), + IstanbulBlock: new(big.Int), + MuirGlacierBlock: new(big.Int), + BerlinBlock: new(big.Int), + LondonBlock: new(big.Int), + ArrowGlacierBlock: nil, + GrayGlacierBlock: nil, + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, + MergeNetsplitBlock: nil, + ShanghaiTime: &shanghaiTime, + CancunTime: &cancunTime} } - if cfg.Difficulty == nil { cfg.Difficulty = new(big.Int) } @@ -101,6 +110,10 @@ func setDefaults(cfg *Config) { if cfg.BlobBaseFee == nil { cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice) } + // Merge indicators + if t := cfg.ChainConfig.ShanghaiTime; cfg.ChainConfig.TerminalTotalDifficultyPassed || (t != nil && *t == 0) { + cfg.Random = &(common.Hash{}) + } } // Execute executes the code using the input as call data during the execution. diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 45228e78c41a..04abc5480eac 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -105,7 +105,7 @@ func TestExecute(t *testing.T) { func TestCall(t *testing.T) { state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - address := common.HexToAddress("0x0a") + address := common.HexToAddress("0xaa") state.SetCode(address, []byte{ byte(vm.PUSH1), 10, byte(vm.PUSH1), 0, @@ -725,7 +725,7 @@ func TestRuntimeJSTracer(t *testing.T) { byte(vm.CREATE), byte(vm.POP), }, - results: []string{`"1,1,952855,6,12"`, `"1,1,952855,6,0"`}, + results: []string{`"1,1,952853,6,12"`, `"1,1,952853,6,0"`}, }, { // CREATE2 @@ -741,7 +741,7 @@ func TestRuntimeJSTracer(t *testing.T) { byte(vm.CREATE2), byte(vm.POP), }, - results: []string{`"1,1,952846,6,13"`, `"1,1,952846,6,0"`}, + results: []string{`"1,1,952844,6,13"`, `"1,1,952844,6,0"`}, }, { // CALL diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go index 9b26ab292859..85ba885d6f5f 100644 --- a/crypto/secp256k1/curve.go +++ b/crypto/secp256k1/curve.go @@ -79,52 +79,52 @@ type BitCurve struct { BitSize int // the size of the underlying field } -func (BitCurve *BitCurve) Params() *elliptic.CurveParams { +func (bitCurve *BitCurve) Params() *elliptic.CurveParams { return &elliptic.CurveParams{ - P: BitCurve.P, - N: BitCurve.N, - B: BitCurve.B, - Gx: BitCurve.Gx, - Gy: BitCurve.Gy, - BitSize: BitCurve.BitSize, + P: bitCurve.P, + N: bitCurve.N, + B: bitCurve.B, + Gx: bitCurve.Gx, + Gy: bitCurve.Gy, + BitSize: bitCurve.BitSize, } } // IsOnCurve returns true if the given (x,y) lies on the BitCurve. -func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { // y² = x³ + b y2 := new(big.Int).Mul(y, y) //y² - y2.Mod(y2, BitCurve.P) //y²%P + y2.Mod(y2, bitCurve.P) //y²%P x3 := new(big.Int).Mul(x, x) //x² x3.Mul(x3, x) //x³ - x3.Add(x3, BitCurve.B) //x³+B - x3.Mod(x3, BitCurve.P) //(x³+B)%P + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P return x3.Cmp(y2) == 0 } // affineFromJacobian reverses the Jacobian transform. See the comment at the // top of the file. -func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { if z.Sign() == 0 { return new(big.Int), new(big.Int) } - zinv := new(big.Int).ModInverse(z, BitCurve.P) + zinv := new(big.Int).ModInverse(z, bitCurve.P) zinvsq := new(big.Int).Mul(zinv, zinv) xOut = new(big.Int).Mul(x, zinvsq) - xOut.Mod(xOut, BitCurve.P) + xOut.Mod(xOut, bitCurve.P) zinvsq.Mul(zinvsq, zinv) yOut = new(big.Int).Mul(y, zinvsq) - yOut.Mod(yOut, BitCurve.P) + yOut.Mod(yOut, bitCurve.P) return } // Add returns the sum of (x1,y1) and (x2,y2) -func (BitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { // If one point is at infinity, return the other point. // Adding the point at infinity to any point will preserve the other point. if x1.Sign() == 0 && y1.Sign() == 0 { @@ -135,27 +135,27 @@ func (BitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { } z := new(big.Int).SetInt64(1) if x1.Cmp(x2) == 0 && y1.Cmp(y2) == 0 { - return BitCurve.affineFromJacobian(BitCurve.doubleJacobian(x1, y1, z)) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z)) } - return BitCurve.affineFromJacobian(BitCurve.addJacobian(x1, y1, z, x2, y2, z)) + return bitCurve.affineFromJacobian(bitCurve.addJacobian(x1, y1, z, x2, y2, z)) } // addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and // (x2, y2, z2) and returns their sum, also in Jacobian form. -func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl z1z1 := new(big.Int).Mul(z1, z1) - z1z1.Mod(z1z1, BitCurve.P) + z1z1.Mod(z1z1, bitCurve.P) z2z2 := new(big.Int).Mul(z2, z2) - z2z2.Mod(z2z2, BitCurve.P) + z2z2.Mod(z2z2, bitCurve.P) u1 := new(big.Int).Mul(x1, z2z2) - u1.Mod(u1, BitCurve.P) + u1.Mod(u1, bitCurve.P) u2 := new(big.Int).Mul(x2, z1z1) - u2.Mod(u2, BitCurve.P) + u2.Mod(u2, bitCurve.P) h := new(big.Int).Sub(u2, u1) if h.Sign() == -1 { - h.Add(h, BitCurve.P) + h.Add(h, bitCurve.P) } i := new(big.Int).Lsh(h, 1) i.Mul(i, i) @@ -163,13 +163,13 @@ func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int s1 := new(big.Int).Mul(y1, z2) s1.Mul(s1, z2z2) - s1.Mod(s1, BitCurve.P) + s1.Mod(s1, bitCurve.P) s2 := new(big.Int).Mul(y2, z1) s2.Mul(s2, z1z1) - s2.Mod(s2, BitCurve.P) + s2.Mod(s2, bitCurve.P) r := new(big.Int).Sub(s2, s1) if r.Sign() == -1 { - r.Add(r, BitCurve.P) + r.Add(r, bitCurve.P) } r.Lsh(r, 1) v := new(big.Int).Mul(u1, i) @@ -179,7 +179,7 @@ func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int x3.Sub(x3, j) x3.Sub(x3, v) x3.Sub(x3, v) - x3.Mod(x3, BitCurve.P) + x3.Mod(x3, bitCurve.P) y3 := new(big.Int).Set(r) v.Sub(v, x3) @@ -187,33 +187,33 @@ func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int s1.Mul(s1, j) s1.Lsh(s1, 1) y3.Sub(y3, s1) - y3.Mod(y3, BitCurve.P) + y3.Mod(y3, bitCurve.P) z3 := new(big.Int).Add(z1, z2) z3.Mul(z3, z3) z3.Sub(z3, z1z1) if z3.Sign() == -1 { - z3.Add(z3, BitCurve.P) + z3.Add(z3, bitCurve.P) } z3.Sub(z3, z2z2) if z3.Sign() == -1 { - z3.Add(z3, BitCurve.P) + z3.Add(z3, bitCurve.P) } z3.Mul(z3, h) - z3.Mod(z3, BitCurve.P) + z3.Mod(z3, bitCurve.P) return x3, y3, z3 } // Double returns 2*(x,y) -func (BitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { z1 := new(big.Int).SetInt64(1) - return BitCurve.affineFromJacobian(BitCurve.doubleJacobian(x1, y1, z1)) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) } // doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and // returns its double, also in Jacobian form. -func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l a := new(big.Int).Mul(x, x) //X1² @@ -231,30 +231,30 @@ func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D x3.Sub(f, x3) //F-2*D - x3.Mod(x3, BitCurve.P) + x3.Mod(x3, bitCurve.P) y3 := new(big.Int).Sub(d, x3) //D-X3 y3.Mul(e, y3) //E*(D-X3) y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C - y3.Mod(y3, BitCurve.P) + y3.Mod(y3, bitCurve.P) z3 := new(big.Int).Mul(y, z) //Y1*Z1 z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 - z3.Mod(z3, BitCurve.P) + z3.Mod(z3, bitCurve.P) return x3, y3, z3 } // ScalarBaseMult returns k*G, where G is the base point of the group and k is // an integer in big-endian form. -func (BitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { - return BitCurve.ScalarMult(BitCurve.Gx, BitCurve.Gy, k) +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) } // Marshal converts a point into the form specified in section 4.3.6 of ANSI // X9.62. -func (BitCurve *BitCurve) Marshal(x, y *big.Int) []byte { - byteLen := (BitCurve.BitSize + 7) >> 3 +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 ret := make([]byte, 1+2*byteLen) ret[0] = 4 // uncompressed point flag readBits(x, ret[1:1+byteLen]) @@ -264,8 +264,8 @@ func (BitCurve *BitCurve) Marshal(x, y *big.Int) []byte { // Unmarshal converts a point, serialised by Marshal, into an x, y pair. On // error, x = nil. -func (BitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { - byteLen := (BitCurve.BitSize + 7) >> 3 +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 if len(data) != 1+2*byteLen { return } diff --git a/crypto/secp256k1/scalar_mult_cgo.go b/crypto/secp256k1/scalar_mult_cgo.go index bdf8eeede7df..d11c11faf85b 100644 --- a/crypto/secp256k1/scalar_mult_cgo.go +++ b/crypto/secp256k1/scalar_mult_cgo.go @@ -21,7 +21,7 @@ extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, const unsigned */ import "C" -func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { // Ensure scalar is exactly 32 bytes. We pad always, even if // scalar is 32 bytes long, to avoid a timing side channel. if len(scalar) > 32 { diff --git a/crypto/secp256k1/scalar_mult_nocgo.go b/crypto/secp256k1/scalar_mult_nocgo.go index 22f53ac6ae65..feb13a8dfd0e 100644 --- a/crypto/secp256k1/scalar_mult_nocgo.go +++ b/crypto/secp256k1/scalar_mult_nocgo.go @@ -9,6 +9,6 @@ package secp256k1 import "math/big" -func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { panic("ScalarMult is not available when secp256k1 is built without cgo") } diff --git a/eth/backend.go b/eth/backend.go index 0c2f829e53a8..3a1c4eb923f4 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -19,7 +19,6 @@ package eth import ( "encoding/json" - "errors" "fmt" "github.com/ethereum/go-ethereum/core/txpool/rip7560pool" "math/big" @@ -106,9 +105,6 @@ type Ethereum struct { // whose lifecycle will be managed by the provided node. func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // Ensure configuration values are compatible and sane - if config.SyncMode == downloader.LightSync { - return nil, errors.New("can't run eth.Ethereum in light sync mode, light mode has been deprecated") - } if !config.SyncMode.IsValid() { return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode) } @@ -209,7 +205,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } t, err := tracers.LiveDirectory.New(config.VMTrace, traceConfig) if err != nil { - return nil, fmt.Errorf("Failed to create tracer %s: %v", config.VMTrace, err) + return nil, fmt.Errorf("failed to create tracer %s: %v", config.VMTrace, err) } vmConfig.Tracer = t } diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index b8645d6be49b..64e6684be155 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -779,7 +779,7 @@ func setBlockhash(data *engine.ExecutableData) *engine.ExecutableData { Extra: data.ExtraData, MixDigest: data.Random, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) data.BlockHash = block.Hash() return data } @@ -935,7 +935,7 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) { Extra: data.ExtraData, MixDigest: data.Random, } - block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs}) data.BlockHash = block.Hash() // Send the new payload resp2, err := api.NewPayloadV1(data) @@ -979,11 +979,11 @@ func TestSimultaneousNewBlock(t *testing.T) { defer wg.Done() if newResp, err := api.NewPayloadV1(*execData); err != nil { errMu.Lock() - testErr = fmt.Errorf("Failed to insert block: %w", err) + testErr = fmt.Errorf("failed to insert block: %w", err) errMu.Unlock() } else if newResp.Status != "VALID" { errMu.Lock() - testErr = fmt.Errorf("Failed to insert block: %v", newResp.Status) + testErr = fmt.Errorf("failed to insert block: %v", newResp.Status) errMu.Unlock() } }() @@ -1018,7 +1018,7 @@ func TestSimultaneousNewBlock(t *testing.T) { defer wg.Done() if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { errMu.Lock() - testErr = fmt.Errorf("Failed to insert block: %w", err) + testErr = fmt.Errorf("failed to insert block: %w", err) errMu.Unlock() } }() @@ -1554,7 +1554,7 @@ func TestBlockToPayloadWithBlobs(t *testing.T) { }, } - block := types.NewBlock(&header, txs, nil, nil, trie.NewStackTrie(nil)) + block := types.NewBlock(&header, &types.Body{Transactions: txs}, nil, trie.NewStackTrie(nil)) envelope := engine.BlockToExecutableData(block, nil, sidecars) var want int for _, tx := range txs { diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index fecd83f2762c..2d6569e42218 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -279,9 +279,12 @@ func (c *SimulatedBeacon) Rollback() { // Fork sets the head to the provided hash. func (c *SimulatedBeacon) Fork(parentHash common.Hash) error { + // Ensure no pending transactions. + c.eth.TxPool().Sync() if len(c.eth.TxPool().Pending(txpool.PendingFilter{})) != 0 { return errors.New("pending block dirty") } + parent := c.eth.BlockChain().GetBlockByHash(parentHash) if parent == nil { return errors.New("parent not found") diff --git a/eth/downloader/api.go b/eth/downloader/api.go index 90c36afbb5ba..ac175672a0ce 100644 --- a/eth/downloader/api.go +++ b/eth/downloader/api.go @@ -129,7 +129,7 @@ func (api *DownloaderAPI) eventLoop() { } } -// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished. +// Syncing provides information when this node starts synchronising with the Ethereum network and when it's finished. func (api *DownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 7dfc419f4e9c..57c6eee40a0b 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -106,7 +106,7 @@ func (b *beaconBackfiller) resume() { }() // If the downloader fails, report an error as in beacon chain mode there // should be no errors as long as the chain we're syncing to is valid. - if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil { + if err := b.downloader.synchronise(mode, b.started); err != nil { log.Error("Beacon backfilling failed", "err", err) return } @@ -202,7 +202,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { case SnapSync: chainHead = d.blockchain.CurrentSnapBlock() default: - chainHead = d.lightchain.CurrentHeader() + panic("unknown sync mode") } number := chainHead.Number.Uint64() @@ -222,7 +222,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { case SnapSync: linked = d.blockchain.HasFastBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) default: - linked = d.blockchain.HasHeader(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) + panic("unknown sync mode") } if !linked { // This is a programming error. The chain backfiller was called with a @@ -257,7 +257,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { case SnapSync: known = d.blockchain.HasFastBlock(h.Hash(), n) default: - known = d.lightchain.HasHeader(h.Hash(), n) + panic("unknown sync mode") } if !known { end = check @@ -268,9 +268,9 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { return start, nil } -// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling +// fetchHeaders feeds skeleton headers to the downloader queue for scheduling // until sync errors or is finished. -func (d *Downloader) fetchBeaconHeaders(from uint64) error { +func (d *Downloader) fetchHeaders(from uint64) error { var head *types.Header _, tail, _, err := d.skeleton.Bounds() if err != nil { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 941f575aa898..d147414859f2 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -41,17 +41,14 @@ import ( var ( MaxBlockFetch = 128 // Number of blocks to be fetched per retrieval request MaxHeaderFetch = 192 // Number of block headers to be fetched per retrieval request - MaxSkeletonSize = 128 // Number of header fetches needed for a skeleton assembly MaxReceiptFetch = 256 // Number of transaction receipts to allow fetching per request - maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxHeadersProcess = 2048 // Number of header download results to import at once into the chain - maxResultsProcess = 2048 // Number of content download results to import at once into the chain - fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) + maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) + maxHeadersProcess = 2048 // Number of header download results to import at once into the chain + maxResultsProcess = 2048 // Number of content download results to import at once into the chain + fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection - reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs + reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download @@ -59,26 +56,17 @@ var ( ) var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errUnsyncedPeer = errors.New("unsynced peer") - errNoPeers = errors.New("no peers to keep download active") + errBusy = errors.New("busy") + errBadPeer = errors.New("action from bad peer ignored") + errTimeout = errors.New("timeout") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all tried for download") - errInvalidAncestor = errors.New("retrieved ancestor is invalid") errInvalidChain = errors.New("retrieved hash chain is invalid") errInvalidBody = errors.New("retrieved block body is invalid") errInvalidReceipt = errors.New("retrieved receipt is invalid") errCancelStateFetch = errors.New("state data download canceled (requested)") errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") - errTooOld = errors.New("peer's protocol version too old") - errNoAncestorFound = errors.New("no common ancestor found") errNoPivotHeader = errors.New("pivot header is not found") - ErrMergeTransition = errors.New("legacy sync reached the merge") ) // peerDropFn is a callback type for dropping a peer detected as malicious. @@ -99,9 +87,8 @@ type Downloader struct { mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events - genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) - queue *queue // Scheduler for selecting the hashes to download - peers *peerSet // Set of active peers from which download can proceed + queue *queue // Scheduler for selecting the hashes to download + peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database // Database to state sync into (and deduplicate via) @@ -110,7 +97,6 @@ type Downloader struct { syncStatsChainHeight uint64 // Highest block number known when syncing started syncStatsLock sync.RWMutex // Lock protecting the sync stats fields - lightchain LightChain blockchain BlockChain // Callbacks @@ -118,11 +104,10 @@ type Downloader struct { badBlock badBlockFn // Reports a block as rejected by the chain // Status - synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising atomic.Bool - notified atomic.Bool - committed atomic.Bool - ancientLimit uint64 // The maximum block number which can be regarded as ancient data. + synchronising atomic.Bool + notified atomic.Bool + committed atomic.Bool + ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels headerProcCh chan *headerTask // Channel to feed the header processor new tasks @@ -138,7 +123,6 @@ type Downloader struct { stateSyncStart chan *stateSync // Cancellation and termination - cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. @@ -147,7 +131,6 @@ type Downloader struct { quitLock sync.Mutex // Lock to prevent double closes // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) @@ -158,8 +141,8 @@ type Downloader struct { syncLogTime time.Time // Time instance when status was last reported } -// LightChain encapsulates functions required to synchronise a light chain. -type LightChain interface { +// BlockChain encapsulates functions required to sync a (full or snap) blockchain. +type BlockChain interface { // HasHeader verifies a header's presence in the local chain. HasHeader(common.Hash, uint64) bool @@ -177,11 +160,6 @@ type LightChain interface { // SetHead rewinds the local chain to a new head. SetHead(uint64) error -} - -// BlockChain encapsulates functions required to sync a (full or snap) blockchain. -type BlockChain interface { - LightChain // HasBlock verifies a block's presence in the local chain. HasBlock(common.Hash, uint64) bool @@ -216,17 +194,13 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { - if lightchain == nil { - lightchain = chain - } +func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, dropPeer peerDropFn, success func()) *Downloader { dl := &Downloader{ stateDB: stateDb, mux: mux, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), peers: newPeerSet(), blockchain: chain, - lightchain: lightchain, dropPeer: dropPeer, headerProcCh: make(chan *headerTask, 1), quitCh: make(chan struct{}), @@ -255,15 +229,13 @@ func (d *Downloader) Progress() ethereum.SyncProgress { current := uint64(0) mode := d.getMode() - switch { - case d.blockchain != nil && mode == FullSync: + switch mode { + case FullSync: current = d.blockchain.CurrentBlock().Number.Uint64() - case d.blockchain != nil && mode == SnapSync: + case SnapSync: current = d.blockchain.CurrentSnapBlock().Number.Uint64() - case d.lightchain != nil: - current = d.lightchain.CurrentHeader().Number.Uint64() default: - log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) + log.Error("Unknown downloader mode", "mode", mode) } progress, pending := d.SnapSyncer.Progress() @@ -326,39 +298,10 @@ func (d *Downloader) UnregisterPeer(id string) error { return nil } -// LegacySync tries to sync up our local block chain with a remote peer, both -// adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { - err := d.synchronise(id, head, td, ttd, mode, false, nil) - - switch err { - case nil, errBusy, errCanceled: - return err - } - if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || - errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || - errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { - log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) - } else { - d.dropPeer(id) - } - return err - } - if errors.Is(err, ErrMergeTransition) { - return err // This is an expected fault, don't keep printing it in a spin-loop - } - log.Warn("Synchronisation failed, retrying", "err", err) - return err -} - // synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { +func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error { // The beacon header syncer is async. It will start this synchronization and // will continue doing other tasks. However, if synchronization needs to be // cancelled, the syncer needs to know if we reached the startup point (and @@ -373,10 +316,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, } }() } - // Mock out the synchronisation if testing - if d.synchroniseMock != nil { - return d.synchroniseMock(id, hash) - } // Make sure only one goroutine is ever allowed past this point at once if !d.synchronising.CompareAndSwap(false, true) { return errBusy @@ -424,7 +363,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // Create cancel channel for aborting mid-flight and mark the master peer d.cancelLock.Lock() d.cancelCh = make(chan struct{}) - d.cancelPeer = id d.cancelLock.Unlock() defer d.Cancel() // No matter what, we can't leave the cancel channel open @@ -432,85 +370,64 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // Atomically set the requested sync mode d.mode.Store(uint32(mode)) - // Retrieve the origin peer and initiate the downloading process - var p *peerConnection - if !beaconMode { // Beacon mode doesn't need a peer to sync from - p = d.peers.Peer(id) - if p == nil { - return errUnknownPeer - } - } if beaconPing != nil { close(beaconPing) } - return d.syncWithPeer(p, hash, td, ttd, beaconMode) + return d.syncToHead() } func (d *Downloader) getMode() SyncMode { return SyncMode(d.mode.Load()) } -// syncWithPeer starts a block synchronization based on the hash chain from the -// specified peer and head hash. -func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { +// syncToHead starts a block synchronization based on the hash chain from +// the specified head hash. +func (d *Downloader) syncToHead() (err error) { d.mux.Post(StartEvent{}) defer func() { // reset on error if err != nil { d.mux.Post(FailedEvent{err}) } else { - latest := d.lightchain.CurrentHeader() + latest := d.blockchain.CurrentHeader() d.mux.Post(DoneEvent{latest}) } }() mode := d.getMode() - if !beaconMode { - log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) - } else { - log.Debug("Backfilling with the network", "mode", mode) - } + log.Debug("Backfilling with the network", "mode", mode) defer func(start time.Time) { log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) }(time.Now()) // Look up the sync boundaries: the common ancestor and the target block var latest, pivot, final *types.Header - if !beaconMode { - // In legacy mode, use the master peer to retrieve the headers from - latest, pivot, err = d.fetchHead(p) - if err != nil { - return err - } - } else { - // In beacon mode, use the skeleton chain to retrieve the headers from - latest, _, final, err = d.skeleton.Bounds() - if err != nil { - return err - } - if latest.Number.Uint64() > uint64(fsMinFullBlocks) { - number := latest.Number.Uint64() - uint64(fsMinFullBlocks) - - // Retrieve the pivot header from the skeleton chain segment but - // fallback to local chain if it's not found in skeleton space. - if pivot = d.skeleton.Header(number); pivot == nil { - _, oldest, _, _ := d.skeleton.Bounds() // error is already checked - if number < oldest.Number.Uint64() { - count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks - headers := d.readHeaderRange(oldest, count) - if len(headers) == count { - pivot = headers[len(headers)-1] - log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) - } + latest, _, final, err = d.skeleton.Bounds() + if err != nil { + return err + } + if latest.Number.Uint64() > uint64(fsMinFullBlocks) { + number := latest.Number.Uint64() - uint64(fsMinFullBlocks) + + // Retrieve the pivot header from the skeleton chain segment but + // fallback to local chain if it's not found in skeleton space. + if pivot = d.skeleton.Header(number); pivot == nil { + _, oldest, _, _ := d.skeleton.Bounds() // error is already checked + if number < oldest.Number.Uint64() { + count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks + headers := d.readHeaderRange(oldest, count) + if len(headers) == count { + pivot = headers[len(headers)-1] + log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) } } - // Print an error log and return directly in case the pivot header - // is still not found. It means the skeleton chain is not linked - // correctly with local chain. - if pivot == nil { - log.Error("Pivot header is not found", "number", number) - return errNoPivotHeader - } + } + // Print an error log and return directly in case the pivot header + // is still not found. It means the skeleton chain is not linked + // correctly with local chain. + if pivot == nil { + log.Error("Pivot header is not found", "number", number) + return errNoPivotHeader } } // If no pivot block was returned, the head is below the min full block @@ -522,19 +439,10 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } height := latest.Number.Uint64() - var origin uint64 - if !beaconMode { - // In legacy mode, reach out to the network and find the ancestor - origin, err = d.findAncestor(p, latest) - if err != nil { - return err - } - } else { - // In beacon mode, use the skeleton chain for the ancestor lookup - origin, err = d.findBeaconAncestor() - if err != nil { - return err - } + // In beacon mode, use the skeleton chain for the ancestor lookup + origin, err := d.findBeaconAncestor() + if err != nil { + return err } d.syncStatsLock.Lock() if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { @@ -577,24 +485,15 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * // the ancientLimit through that. Otherwise calculate the ancient limit through // the advertised height of the remote peer. This most is mostly a fallback for // legacy networks, but should eventually be dropped. TODO(karalabe). - if beaconMode { - // Beacon sync, use the latest finalized block as the ancient limit - // or a reasonable height if no finalized block is yet announced. - if final != nil { - d.ancientLimit = final.Number.Uint64() - } else if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } + // + // Beacon sync, use the latest finalized block as the ancient limit + // or a reasonable height if no finalized block is yet announced. + if final != nil { + d.ancientLimit = final.Number.Uint64() + } else if height > fullMaxForkAncestry+1 { + d.ancientLimit = height - fullMaxForkAncestry - 1 } else { - // Legacy sync, use the best announcement we have from the remote peer. - // TODO(karalabe): Drop this pathway. - if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } + d.ancientLimit = 0 } frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. @@ -608,7 +507,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } // Rewind the ancient store and blockchain if reorg happens. if origin+1 < frozen { - if err := d.lightchain.SetHead(origin); err != nil { + if err := d.blockchain.SetHead(origin); err != nil { return err } log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin) @@ -616,22 +515,13 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } // Initiate the sync using a concurrent header and content retrieval algorithm d.queue.Prepare(origin+1, mode) - if d.syncInitHook != nil { - d.syncInitHook(origin, height) - } - var headerFetcher func() error - if !beaconMode { - // In legacy mode, headers are retrieved from the network - headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } - } else { - // In beacon mode, headers are served by the skeleton syncer - headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } - } + + // In beacon mode, headers are served by the skeleton syncer fetchers := []func() error{ - headerFetcher, // Headers are always retrieved - func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync - func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync - func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, + func() error { return d.fetchHeaders(origin + 1) }, // Headers are always retrieved + func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and snap sync + func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync + func() error { return d.processHeaders(origin + 1) }, } if mode == SnapSync { d.pivotLock.Lock() @@ -640,7 +530,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) } else if mode == FullSync { - fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) + fetchers = append(fetchers, func() error { return d.processFullSyncContent() }) } return d.spawnSync(fetchers) } @@ -719,540 +609,12 @@ func (d *Downloader) Terminate() { d.Cancel() } -// fetchHead retrieves the head header and prior pivot block (if available) from -// a remote peer. -func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { - p.log.Debug("Retrieving remote chain head") - mode := d.getMode() - - // Request the advertised remote head block and wait for the response - latest, _ := p.peer.Head() - fetch := 1 - if mode == SnapSync { - fetch = 2 // head + pivot headers - } - headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) - if err != nil { - return nil, nil, err - } - // Make sure the peer gave us at least one and at most the requested headers - if len(headers) == 0 || len(headers) > fetch { - return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) - } - // The first header needs to be the head, validate against the request. If - // only 1 header was returned, make sure there's no pivot or there was not - // one requested. - head = headers[0] - if len(headers) == 1 { - if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) - } - p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) - return head, nil, nil - } - // At this point we have 2 headers in total and the first is the - // validated head of the chain. Check the pivot number and return, - pivot = headers[1] - if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) - } - return head, pivot, nil -} - -// calculateRequestSpan calculates what headers to request from a peer when trying to determine the -// common ancestor. -// It returns parameters to be used for peer.RequestHeadersByNumber: -// -// from - starting block number -// count - number of headers to request -// skip - number of headers to skip -// -// and also returns 'max', the last block which is expected to be returned by the remote peers, -// given the (from,count,skip) -func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { - var ( - from int - count int - MaxCount = MaxHeaderFetch / 16 - ) - // requestHead is the highest block that we will ask for. If requestHead is not offset, - // the highest block that we will get is 16 blocks back from head, which means we - // will fetch 14 or 15 blocks unnecessarily in the case the height difference - // between us and the peer is 1-2 blocks, which is most common - requestHead := int(remoteHeight) - 1 - if requestHead < 0 { - requestHead = 0 - } - // requestBottom is the lowest block we want included in the query - // Ideally, we want to include the one just below our own head - requestBottom := int(localHeight - 1) - if requestBottom < 0 { - requestBottom = 0 - } - totalSpan := requestHead - requestBottom - span := 1 + totalSpan/MaxCount - if span < 2 { - span = 2 - } - if span > 16 { - span = 16 - } - - count = 1 + totalSpan/span - if count > MaxCount { - count = MaxCount - } - if count < 2 { - count = 2 - } - from = requestHead - (count-1)*span - if from < 0 { - from = 0 - } - max := from + (count-1)*span - return int64(from), count, span - 1, uint64(max) -} - -// findAncestor tries to locate the common ancestor link of the local chain and -// a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N links should already get us a match. -// In the rare scenario when we ended up on a long reorganisation (i.e. none of -// the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { - // Figure out the valid ancestor range to prevent rewrite attacks - var ( - floor = int64(-1) - localHeight uint64 - remoteHeight = remoteHeader.Number.Uint64() - ) - mode := d.getMode() - switch mode { - case FullSync: - localHeight = d.blockchain.CurrentBlock().Number.Uint64() - case SnapSync: - localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64() - default: - localHeight = d.lightchain.CurrentHeader().Number.Uint64() - } - p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) - - // Recap floor value for binary search - maxForkAncestry := fullMaxForkAncestry - if d.getMode() == LightSync { - maxForkAncestry = lightMaxForkAncestry - } - if localHeight >= maxForkAncestry { - // We're above the max reorg threshold, find the earliest fork point - floor = int64(localHeight - maxForkAncestry) - } - // If we're doing a light sync, ensure the floor doesn't go below the CHT, as - // all headers before that point will be missing. - if mode == LightSync { - // If we don't know the current CHT position, find it - if d.genesis == 0 { - header := d.lightchain.CurrentHeader() - for header != nil { - d.genesis = header.Number.Uint64() - if floor >= int64(d.genesis)-1 { - break - } - header = d.lightchain.GetHeaderByHash(header.ParentHash) - } - } - // We already know the "genesis" block number, cap floor to that - if floor < int64(d.genesis)-1 { - floor = int64(d.genesis) - 1 - } - } - - ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) - if err == nil { - return ancestor, nil - } - // The returned error was not nil. - // If the error returned does not reflect that a common ancestor was not found, return it. - // If the error reflects that a common ancestor was not found, continue to binary search, - // where the error value will be reassigned. - if !errors.Is(err, errNoAncestorFound) { - return 0, err - } - - ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) - if err != nil { - return 0, err - } - return ancestor, nil -} - -func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { - from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) - - p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) - headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) - if err != nil { - return 0, err - } - // Wait for the remote response to the head fetch - number, hash := uint64(0), common.Hash{} - - // Make sure the peer actually gave something valid - if len(headers) == 0 { - p.log.Warn("Empty head header set") - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i, header := range headers { - expectNumber := from + int64(i)*int64(skip+1) - if number := header.Number.Int64(); number != expectNumber { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) - return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) - } - } - // Check if a common ancestor was found - for i := len(headers) - 1; i >= 0; i-- { - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { - continue - } - // Otherwise check if we already know the header or not - h := hashes[i] - n := headers[i].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case SnapSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if known { - number, hash = n, h - break - } - } - // If the head fetch already found an ancestor, return - if hash != (common.Hash{}) { - if int64(number) <= floor { - p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", number, "hash", hash) - return number, nil - } - return 0, errNoAncestorFound -} - -func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { - hash := common.Hash{} - - // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), remoteHeight - if floor > 0 { - start = uint64(floor) - } - p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) - - for start+1 < end { - // Split our chain interval in two, and request the hash to cross check - check := (start + end) / 2 - - headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) - if err != nil { - return 0, err - } - // Make sure the peer actually gave something valid - if len(headers) != 1 { - p.log.Warn("Multiple headers for single request", "headers", len(headers)) - return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) - } - // Modify the search interval based on the response - h := hashes[0] - n := headers[0].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case SnapSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if !known { - end = check - continue - } - header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) - return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) - } - start = check - hash = h - } - // Ensure valid ancestry and return - if int64(start) <= floor { - p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", start, "hash", hash) - return start, nil -} - -// fetchHeaders keeps retrieving headers concurrently from the number -// requested, until no more are returned, potentially throttling on the way. To -// facilitate concurrency but still protect against malicious nodes sending bad -// headers, we construct a header chain skeleton using the "origin" peer we are -// syncing with, and fill in the missing headers using anyone else. Headers from -// other peers are only accepted if they map cleanly to the skeleton. If no one -// can fill in the skeleton - not even the origin peer - it's assumed invalid and -// the origin is dropped. -func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { - p.log.Debug("Directing header downloads", "origin", from) - defer p.log.Debug("Header download terminated") - - // Start pulling the header chain skeleton until all is done - var ( - skeleton = true // Skeleton assembly phase or finishing up - pivoting = false // Whether the next request is pivot verification - ancestor = from - mode = d.getMode() - ) - for { - // Pull the next batch of headers, it either: - // - Pivot check to see if the chain moved too far - // - Skeleton retrieval to permit concurrent header fetches - // - Full header retrieval if we're near the chain head - var ( - headers []*types.Header - hashes []common.Hash - err error - ) - switch { - case pivoting: - d.pivotLock.RLock() - pivot := d.pivotHeader.Number.Uint64() - d.pivotLock.RUnlock() - - p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) - headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - - case skeleton: - p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) - headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - - default: - p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) - headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) - } - switch err { - case nil: - // Headers retrieved, continue with processing - - case errCanceled: - // Sync cancelled, no issue, propagate up - return err - - default: - // Header retrieval either timed out, or the peer failed in some strange way - // (e.g. disconnect). Consider the master peer bad and drop - d.dropPeer(p.id) - - // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - select { - case d.headerProcCh <- nil: - case <-d.cancelCh: - } - return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) - } - // If the pivot is being checked, move if it became stale and run the real retrieval - var pivot uint64 - - d.pivotLock.RLock() - if d.pivotHeader != nil { - pivot = d.pivotHeader.Number.Uint64() - } - d.pivotLock.RUnlock() - - if pivoting { - if len(headers) == 2 { - if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { - log.Warn("Peer sent invalid next pivot", "have", have, "want", want) - return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) - } - if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { - log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) - return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) - } - log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) - pivot = headers[0].Number.Uint64() - - d.pivotLock.Lock() - d.pivotHeader = headers[0] - d.pivotLock.Unlock() - - // Write out the pivot into the database so a rollback beyond - // it will reenable snap sync and update the state root that - // the state syncer will be downloading. - rawdb.WriteLastPivotNumber(d.stateDB, pivot) - } - // Disable the pivot check and fetch the next batch of headers - pivoting = false - continue - } - // If the skeleton's finished, pull any remaining head headers directly from the origin - if skeleton && len(headers) == 0 { - // A malicious node might withhold advertised headers indefinitely - if from+uint64(MaxHeaderFetch)-1 <= head { - p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) - return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) - } - p.log.Debug("No skeleton, fetching headers directly") - skeleton = false - continue - } - // If no more headers are inbound, notify the content fetchers and return - if len(headers) == 0 { - // Don't abort header fetches while the pivot is downloading - if !d.committed.Load() && pivot <= from { - p.log.Debug("No headers, waiting for pivot commit") - select { - case <-time.After(fsHeaderContCheck): - continue - case <-d.cancelCh: - return errCanceled - } - } - // Pivot done (or not in snap sync) and no more headers, terminate the process - p.log.Debug("No more headers available") - select { - case d.headerProcCh <- nil: - return nil - case <-d.cancelCh: - return errCanceled - } - } - // If we received a skeleton batch, resolve internals concurrently - var progressed bool - if skeleton { - filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - p.log.Debug("Skeleton chain invalid", "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - headers = filled[proced:] - hashes = hashset[proced:] - - progressed = proced > 0 - from += uint64(proced) - } else { - // A malicious node might withhold advertised headers indefinitely - if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { - p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) - return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) - } - // If we're closing in on the chain head, but haven't yet reached it, delay - // the last few headers so mini reorgs on the head don't cause invalid hash - // chain errors. - if n := len(headers); n > 0 { - // Retrieve the current head we're at - var head uint64 - if mode == LightSync { - head = d.lightchain.CurrentHeader().Number.Uint64() - } else { - head = d.blockchain.CurrentSnapBlock().Number.Uint64() - if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full { - head = full - } - } - // If the head is below the common ancestor, we're actually deduplicating - // already existing chain segments, so use the ancestor as the fake head. - // Otherwise, we might end up delaying header deliveries pointlessly. - if head < ancestor { - head = ancestor - } - // If the head is way older than this batch, delay the last few headers - if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { - delay := reorgProtHeaderDelay - if delay > n { - delay = n - } - headers = headers[:n-delay] - hashes = hashes[:n-delay] - } - } - } - // If no headers have been delivered, or all of them have been delayed, - // sleep a bit and retry. Take care with headers already consumed during - // skeleton filling - if len(headers) == 0 && !progressed { - p.log.Trace("All headers delayed, waiting") - select { - case <-time.After(fsHeaderContCheck): - continue - case <-d.cancelCh: - return errCanceled - } - } - // Insert any remaining new headers and fetch the next batch - if len(headers) > 0 { - p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) - select { - case d.headerProcCh <- &headerTask{ - headers: headers, - hashes: hashes, - }: - case <-d.cancelCh: - return errCanceled - } - from += uint64(len(headers)) - } - // If we're still skeleton filling snap sync, check pivot staleness - // before continuing to the next skeleton filling - if skeleton && pivot > 0 { - pivoting = true - } - } -} - -// fillHeaderSkeleton concurrently retrieves headers from all our available peers -// and maps them to the provided skeleton header chain. -// -// Any partial results from the beginning of the skeleton is (if possible) forwarded -// immediately to the header processor to keep the rest of the pipeline full even -// in the case of header stalls. -// -// The method returns the entire filled skeleton and also the number of headers -// already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { - log.Debug("Filling up skeleton", "from", from) - d.queue.ScheduleSkeleton(from, skeleton) - - err := d.concurrentFetch((*headerQueue)(d), false) - if err != nil { - log.Debug("Skeleton fill failed", "err", err) - } - filled, hashes, proced := d.queue.RetrieveHeaders() - if err == nil { - log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) - } - return filled, hashes, proced, err -} - // fetchBodies iteratively downloads the scheduled block bodies, taking any // available peers, reserving a chunk of blocks for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { +func (d *Downloader) fetchBodies(from uint64) error { log.Debug("Downloading block bodies", "origin", from) - err := d.concurrentFetch((*bodyQueue)(d), beaconMode) + err := d.concurrentFetch((*bodyQueue)(d)) log.Debug("Block body download terminated", "err", err) return err @@ -1261,9 +623,9 @@ func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { // fetchReceipts iteratively downloads the scheduled block receipts, taking any // available peers, reserving a chunk of receipts for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { +func (d *Downloader) fetchReceipts(from uint64) error { log.Debug("Downloading receipts", "origin", from) - err := d.concurrentFetch((*receiptQueue)(d), beaconMode) + err := d.concurrentFetch((*receiptQueue)(d)) log.Debug("Receipt download terminated", "err", err) return err @@ -1272,11 +634,10 @@ func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { // processHeaders takes batches of retrieved headers from an input channel and // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. -func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { +func (d *Downloader) processHeaders(origin uint64) error { var ( - mode = d.getMode() - gotHeaders = false // Wait for batches of headers to process - timer = time.NewTimer(time.Second) + mode = d.getMode() + timer = time.NewTimer(time.Second) ) defer timer.Stop() @@ -1295,48 +656,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode case <-d.cancelCh: } } - // If we're in legacy sync mode, we need to check total difficulty - // violations from malicious peers. That is not needed in beacon - // mode and we can skip to terminating sync. - if !beaconMode { - // If no headers were retrieved at all, the peer violated its TD promise that it had a - // better chain compared to ours. The only exception is if its promised blocks were - // already imported by other means (e.g. fetcher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if mode != LightSync { - head := d.blockchain.CurrentBlock() - if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - // If snap or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if mode == SnapSync || mode == LightSync { - head := d.lightchain.CurrentHeader() - if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - } return nil } // Otherwise split the chunk of headers into batches and process them headers, hashes := task.headers, task.hashes - gotHeaders = true for len(headers) > 0 { // Terminate if something failed in between processing chunks select { @@ -1353,66 +677,32 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode chunkHashes := hashes[:limit] // In case of header only syncing, validate the chunk immediately - if mode == SnapSync || mode == LightSync { + if mode == SnapSync { // Although the received headers might be all valid, a legacy // PoW/PoA sync must not accept post-merge headers. Make sure // that any transition is rejected at this point. - var ( - rejected []*types.Header - td *big.Int - ) - if !beaconMode && ttd != nil { - td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) - if td == nil { - // This should never really happen, but handle gracefully for now - log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) - return fmt.Errorf("%w: parent TD missing", errInvalidChain) - } - for i, header := range chunkHeaders { - td = new(big.Int).Add(td, header.Difficulty) - if td.Cmp(ttd) >= 0 { - // Terminal total difficulty reached, allow the last header in - if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { - chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] - if len(rejected) > 0 { - // Make a nicer user log as to the first TD truly rejected - td = new(big.Int).Add(td, rejected[0].Difficulty) - } - } else { - chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] - } - break - } - } - } if len(chunkHeaders) > 0 { - if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { + if n, err := d.blockchain.InsertHeaderChain(chunkHeaders); err != nil { log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } } - if len(rejected) != 0 { - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) - return ErrMergeTransition - } } - // Unless we're doing light chains, schedule the headers for associated content retrieval - if mode == FullSync || mode == SnapSync { - // If we've reached the allowed number of pending headers, stall a bit - for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { - timer.Reset(time.Second) - select { - case <-d.cancelCh: - return errCanceled - case <-timer.C: - } - } - // Otherwise insert the headers for content retrieval - inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) - if len(inserts) != len(chunkHeaders) { - return fmt.Errorf("%w: stale headers", errBadPeer) + // If we've reached the allowed number of pending headers, stall a bit + for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { + timer.Reset(time.Second) + select { + case <-d.cancelCh: + return errCanceled + case <-timer.C: } } + // Otherwise insert the headers for content retrieval + inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) + if len(inserts) != len(chunkHeaders) { + return fmt.Errorf("%w: stale headers", errBadPeer) + } + headers = headers[limit:] hashes = hashes[limit:] origin += uint64(limit) @@ -1436,7 +726,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } // processFullSyncContent takes fetch results from the queue and imports them into the chain. -func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { +func (d *Downloader) processFullSyncContent() error { for { results := d.queue.Results(true) if len(results) == 0 { @@ -1445,44 +735,9 @@ func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error if d.chainInsertHook != nil { d.chainInsertHook(results) } - // Although the received blocks might be all valid, a legacy PoW/PoA sync - // must not accept post-merge blocks. Make sure that pre-merge blocks are - // imported, but post-merge ones are rejected. - var ( - rejected []*fetchResult - td *big.Int - ) - if !beaconMode && ttd != nil { - td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) - if td == nil { - // This should never really happen, but handle gracefully for now - log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) - return fmt.Errorf("%w: parent TD missing", errInvalidChain) - } - for i, result := range results { - td = new(big.Int).Add(td, result.Header.Difficulty) - if td.Cmp(ttd) >= 0 { - // Terminal total difficulty reached, allow the last block in - if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { - results, rejected = results[:i+1], results[i+1:] - if len(rejected) > 0 { - // Make a nicer user log as to the first TD truly rejected - td = new(big.Int).Add(td, rejected[0].Header.Difficulty) - } - } else { - results, rejected = results[:i], results[i:] - } - break - } - } - } if err := d.importBlockResults(results); err != nil { return err } - if len(rejected) != 0 { - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) - return ErrMergeTransition - } } } @@ -1504,7 +759,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { ) blocks := make([]*types.Block, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body()) } // Downloaded blocks are always regarded as trusted after the // transition. Because the downloaded chain is guided by the @@ -1726,7 +981,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state blocks := make([]*types.Block, len(results)) receipts := make([]types.Receipts, len(results)) for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body()) receipts[i] = result.Receipts } if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { @@ -1737,7 +992,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state } func (d *Downloader) commitPivotBlock(result *fetchResult) error { - block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) + block := types.NewBlockWithHeader(result.Header).WithBody(result.body()) log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) // Commit the pivot block as the new head, will require full sync from here on @@ -1786,7 +1041,7 @@ func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Hea headers []*types.Header ) for { - parent := d.lightchain.GetHeaderByHash(current.ParentHash) + parent := d.blockchain.GetHeaderByHash(current.ParentHash) if parent == nil { break // The chain is not continuous, or the chain is exhausted } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index c810518d56ae..0cbddee6bf7a 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -19,8 +19,6 @@ package downloader import ( "fmt" "math/big" - "os" - "strings" "sync" "sync/atomic" "testing" @@ -44,7 +42,6 @@ import ( // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { - freezer string chain *core.BlockChain downloader *Downloader @@ -59,8 +56,7 @@ func newTester(t *testing.T) *downloadTester { // newTesterWithNotification creates a new downloader test mocker. func newTesterWithNotification(t *testing.T, success func()) *downloadTester { - freezer := t.TempDir() - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) if err != nil { panic(err) } @@ -77,11 +73,10 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { panic(err) } tester := &downloadTester{ - freezer: freezer, - chain: chain, - peers: make(map[string]*downloadTesterPeer), + chain: chain, + peers: make(map[string]*downloadTesterPeer), } - tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) + tester.downloader = New(db, new(event.TypeMux), tester.chain, tester.dropPeer, success) return tester } @@ -90,27 +85,6 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { func (dl *downloadTester) terminate() { dl.downloader.Terminate() dl.chain.Stop() - - os.RemoveAll(dl.freezer) -} - -// sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - head := dl.peers[id].chain.CurrentBlock() - if td == nil { - // If no particular TD was requested, load from the peer's blockchain - td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64()) - } - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) - select { - case <-dl.downloader.cancelCh: - // Ok, downloader fully cancelled after sync cycle - default: - // Downloader is still accepting packets, can block a peer up - panic("downloader active post sync cycle") // panic will be caught by tester - } - return err } // newPeer registers a new block download source into the downloader. @@ -119,10 +93,10 @@ func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block defer dl.lock.Unlock() peer := &downloadTesterPeer{ - dl: dl, - id: id, - chain: newTestBlockchain(blocks), - withholdHeaders: make(map[common.Hash]struct{}), + dl: dl, + id: id, + chain: newTestBlockchain(blocks), + withholdBodies: make(map[common.Hash]struct{}), } dl.peers[id] = peer @@ -146,11 +120,10 @@ func (dl *downloadTester) dropPeer(id string) { } type downloadTesterPeer struct { - dl *downloadTester - id string - chain *core.BlockChain - - withholdHeaders map[common.Hash]struct{} + dl *downloadTester + withholdBodies map[common.Hash]struct{} + id string + chain *core.BlockChain } // Head constructs a function to retrieve a peer's current head hash @@ -186,15 +159,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i Reverse: reverse, }, nil) headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } hashes := make([]common.Hash, len(headers)) for i, header := range headers { hashes[i] = header.Hash() @@ -230,15 +194,6 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, Reverse: reverse, }, nil) headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } hashes := make([]common.Hash, len(headers)) for i, header := range headers { hashes[i] = header.Hash() @@ -278,7 +233,13 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et ) hasher := trie.NewStackTrie(nil) for i, body := range bodies { - txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + hash := types.DeriveSha(types.Transactions(body.Transactions), hasher) + if _, ok := dlp.withholdBodies[hash]; ok { + txsHashes = append(txsHashes[:i], txsHashes[i+1:]...) + uncleHashes = append(uncleHashes[:i], uncleHashes[i+1:]...) + continue + } + txsHashes[i] = hash uncleHashes[i] = types.CalcUncleHash(body.Uncles) } req := ð.Request{ @@ -423,9 +384,6 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { t.Helper() headers, blocks, receipts := length, length, length - if tester.downloader.getMode() == LightSync { - blocks, receipts = 1, 1 - } if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) } @@ -437,12 +395,14 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } -func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } -func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } +func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } +func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + success := make(chan struct{}) + tester := newTesterWithNotification(t, func() { + close(success) + }) defer tester.terminate() // Create a small enough block chain to download @@ -450,10 +410,15 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { tester.newPeer("peer", protocol, chain.blocks[1:]) // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } + select { + case <-success: + assertOwnChain(t, tester, len(chain.blocks)) + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") } - assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if a large batch of blocks are being downloaded, it is throttled @@ -479,7 +444,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Start a synchronisation concurrently errc := make(chan error, 1) go func() { - errc <- tester.sync("peer", nil, mode) + errc <- tester.downloader.BeaconSync(mode, testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil) }() // Iteratively take some blocks, always checking the retrieval count for { @@ -535,132 +500,16 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that simple synchronization against a forked chain works correctly. In -// this test common ancestor lookup should *not* be short circuited, and a full -// binary search should be executed. -func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } -func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } -func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } - -func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that synchronising against a much shorter but much heavier fork works -// currently and is not dropped. -func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } - -func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) - tester.newPeer("light", protocol, chainA.blocks[1:]) - tester.newPeer("heavy", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("light", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("heavy", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head, ensuring that malicious peers cannot waste resources by feeding -// long dead chains. -func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } - -func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA - chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA.blocks[1:]) - tester.newPeer("rewriter", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head for short but heavy forks too. These are a bit special because they -// take different ancestor lookup paths. -func TestBoundedHeavyForkedSync68Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) -} -func TestBoundedHeavyForkedSync68Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) -} -func TestBoundedHeavyForkedSync68Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) -} - -func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a long enough forked chain - chainA := testChainForkLightA - chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - // Tests that a canceled download wipes all previously accumulated state. -func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } -func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } -func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } +func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } +func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } func testCancel(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + complete := make(chan struct{}) + success := func() { + close(complete) + } + tester := newTesterWithNotification(t, success) defer tester.terminate() chain := testChainBase.shorten(MaxHeaderFetch) @@ -672,46 +521,27 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { t.Errorf("download queue not idle") } // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil, mode); err != nil { + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } + <-complete tester.downloader.Cancel() if !tester.downloader.queue.Idle() { t.Errorf("download queue not idle") } } -// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } -func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } -func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } - -func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create various peers with various parts of the chain - targetPeers := 8 - chain := testChainBase.shorten(targetPeers * 100) - - for i := 0; i < targetPeers; i++ { - id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) - } - if err := tester.sync("peer #0", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } -func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } -func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } +func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } +func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + complete := make(chan struct{}) + success := func() { + close(complete) + } + tester := newTesterWithNotification(t, success) defer tester.terminate() // Create a small enough block chain to download @@ -720,9 +550,14 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Create peers of every type tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) - // Synchronise with the requested peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to start beacon sync: #{err}") + } + select { + case <-complete: + break + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") } assertOwnChain(t, tester, len(chain.blocks)) @@ -737,12 +572,14 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } -func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } -func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } +func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } +func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) + success := make(chan struct{}) + tester := newTesterWithNotification(t, func() { + close(success) + }) defer tester.terminate() // Create a block chain to download @@ -757,16 +594,25 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.receiptFetchHook = func(headers []*types.Header) { receiptsHave.Add(int32(len(headers))) } - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil, mode); err != nil { + + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } + select { + case <-success: + checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ + HighestBlock: uint64(len(chain.blocks) - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + }) + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") + } assertOwnChain(t, tester, len(chain.blocks)) // Validate the number of block bodies that should have been requested bodiesNeeded, receiptsNeeded := 0, 0 for _, block := range chain.blocks[1:] { - if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { + if len(block.Transactions()) > 0 || len(block.Uncles()) > 0 { bodiesNeeded++ } } @@ -783,195 +629,6 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that headers are enqueued continuously, preventing malicious nodes from -// stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } -func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } -func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } - -func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that if requested headers are shifted (i.e. first is missing), the queue -// detects the invalid numbering. -func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } -func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } -func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } - -func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Attempt a full sync with an attacker feeding shifted headers - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that a peer advertising a high TD doesn't get to stall the downloader -// afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack68Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, FullSync) -} -func TestHighTDStarvationAttack68Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, SnapSync) -} -func TestHighTDStarvationAttack68Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, LightSync) -} - -func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain.blocks[1:]) - if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) - } -} - -// Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } - -func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - // Define the disconnection requirement for individual hash fetch errors - tests := []struct { - result error - drop bool - }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - } - // Run the tests and check disconnection status - tester := newTester(t) - defer tester.terminate() - chain := testChainBase.shorten(1) - - for i, tt := range tests { - // Register a new peer and ensure its presence - id := fmt.Sprintf("test %d", i) - tester.newPeer(id, protocol, chain.blocks[1:]) - if _, ok := tester.peers[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Simulate a synchronisation and check the required result - tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - - tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) - if _, ok := tester.peers[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) - } - } -} - -// Tests that synchronisation progress (origin block number, current block number -// and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } - -func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-half", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks)/2 - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("peer-full", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks)/2 - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { // Mark this method as a helper to report errors at callsite, not in here t.Helper() @@ -982,296 +639,12 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync } } -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of a fork (or manual head -// revertal). -func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } -func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } -func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } - -func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork A", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Simulate a successful sync above the fork - tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - - // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork B", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainA.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainB.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) -} - -// Tests that if synchronisation is aborted due to some failure, then the progress -// origin is not updated in the next sync cycle, as it should be considered the -// continuation of the previous sync and not a new instance. -func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } -func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } - -func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Attempt a full sync with a faulty peer - missing := len(chain.blocks)/2 - 1 - - faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) - faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("faulty", nil, mode); err == nil { - panic("succeeded faulty synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress origin remind the same - // after a failure - tester.newPeer("valid", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", afterFailedSync) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - -// Tests that if an attacker fakes a chain height, after the attack is detected, -// the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } -func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } - -func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Create and sync with an attacker that promises a higher chain than available. - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - numMissing := 5 - for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { - attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} - } - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("attack", nil, mode); err == nil { - panic("succeeded attacker synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress height has been reduced to - // the true value. - validChain := chain.shorten(len(chain.blocks) - numMissing) - tester.newPeer("valid", protocol, validChain.blocks[1:]) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(len(validChain.blocks) - 1), - }) - // Check final progress after successful sync. - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(validChain.blocks) - 1), - HighestBlock: uint64(len(validChain.blocks) - 1), - }) -} - -func TestRemoteHeaderRequestSpan(t *testing.T) { - testCases := []struct { - remoteHeight uint64 - localHeight uint64 - expected []int - }{ - // Remote is way higher. We should ask for the remote head and go backwards - {1500, 1000, - []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, - }, - {15000, 13006, - []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, - }, - // Remote is pretty close to us. We don't have to fetch as many - {1200, 1150, - []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, - }, - // Remote is equal to us (so on a fork with higher td) - // We should get the closest couple of ancestors - {1500, 1500, - []int{1497, 1499}, - }, - // We're higher than the remote! Odd - {1000, 1500, - []int{997, 999}, - }, - // Check some weird edgecases that it behaves somewhat rationally - {0, 1500, - []int{0, 2}, - }, - {6000000, 0, - []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, - }, - {0, 0, - []int{0, 2}, - }, - } - reqs := func(from, count, span int) []int { - var r []int - num := from - for len(r) < count { - r = append(r, num) - num += span + 1 - } - return r - } - for i, tt := range testCases { - from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) - data := reqs(int(from), count, span) - - if max != uint64(data[len(data)-1]) { - t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) - } - failed := false - if len(data) != len(tt.expected) { - failed = true - t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) - } else { - for j, n := range data { - if n != tt.expected[j] { - failed = true - break - } - } - } - if failed { - res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") - exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") - t.Logf("got: %v\n", res) - t.Logf("exp: %v\n", exp) - t.Errorf("test %d: wrong values", i) - } - } -} - // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - var cases = []struct { name string // The name of testing scenario local int // The length of local chain(canonical chain assumed), 0 means genesis is the head @@ -1312,81 +685,56 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { } } -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of manual head reversion -func TestBeaconForkedSyncProgress68Full(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, FullSync) -} -func TestBeaconForkedSyncProgress68Snap(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, SnapSync) -} -func TestBeaconForkedSyncProgress68Light(t *testing.T) { - testBeaconForkedSyncProgress(t, eth.ETH68, LightSync) -} +// Tests that synchronisation progress (origin block number, current block number +// and highest block number) is tracked and updated correctly. +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func testBeaconForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { +func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) tester := newTesterWithNotification(t, func() { success <- struct{}{} }) defer tester.terminate() + checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) + chain := testChainBase.shorten(blockCacheMaxItems - 15) + shortChain := chain.shorten(len(chain.blocks) / 2).blocks[1:] - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress + // Connect to peer that provides all headers and part of the bodies + faultyPeer := tester.newPeer("peer-half", protocol, shortChain) + for _, header := range shortChain { + faultyPeer.withholdBodies[header.Hash()] = struct{}{} } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.downloader.BeaconSync(mode, chainA.blocks[len(chainA.blocks)-1].Header(), nil); err != nil { - panic(fmt.Sprintf("failed to beacon sync: %v", err)) - } - }() - - <-starting - progress <- struct{}{} + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } select { case <-success: - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - CurrentBlock: uint64(len(chainA.blocks) - 1), + // Ok, downloader fully cancelled after sync cycle + checkProgress(t, tester.downloader, "peer-half", ethereum.SyncProgress{ + CurrentBlock: uint64(len(chain.blocks)/2 - 1), + HighestBlock: uint64(len(chain.blocks)/2 - 1), }) case <-time.NewTimer(time.Second * 3).C: t.Fatalf("Failed to sync chain in three seconds") } - // Set the head to a second fork - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.downloader.BeaconSync(mode, chainB.blocks[len(chainB.blocks)-1].Header(), nil); err != nil { - panic(fmt.Sprintf("failed to beacon sync: %v", err)) - } - }() - - <-starting - progress <- struct{}{} + // Synchronise all the blocks and check continuation progress + tester.newPeer("peer-full", protocol, chain.blocks[1:]) + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { + t.Fatalf("failed to beacon-sync chain: %v", err) + } + startingBlock := uint64(len(chain.blocks)/2 - 1) - // reorg below available state causes the state sync to rewind to genesis select { case <-success: - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainB.blocks) - 1), - CurrentBlock: uint64(len(chainB.blocks) - 1), - StartingBlock: 0, + // Ok, downloader fully cancelled after sync cycle + checkProgress(t, tester.downloader, "peer-full", ethereum.SyncProgress{ + StartingBlock: startingBlock, + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) case <-time.NewTimer(time.Second * 3).C: t.Fatalf("Failed to sync chain in three seconds") diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go index cc4279b0da7a..4ebb9bbc98a4 100644 --- a/eth/downloader/fetchers.go +++ b/eth/downloader/fetchers.go @@ -68,48 +68,3 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil } } - -// fetchHeadersByNumber is a blocking version of Peer.RequestHeadersByNumber which -// handles all the cancellation, interruption and timeout mechanisms of a data -// retrieval to allow blocking API calls. -func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { - // Create the response sink and send the network request - start := time.Now() - resCh := make(chan *eth.Response) - - req, err := p.peer.RequestHeadersByNumber(number, amount, skip, reverse, resCh) - if err != nil { - return nil, nil, err - } - defer req.Close() - - // Wait until the response arrives, the request is cancelled or times out - ttl := d.peers.rates.TargetTimeout() - - timeoutTimer := time.NewTimer(ttl) - defer timeoutTimer.Stop() - - select { - case <-d.cancelCh: - return nil, nil, errCanceled - - case <-timeoutTimer.C: - // Header retrieval timed out, update the metrics - p.log.Debug("Header request timed out", "elapsed", ttl) - headerTimeoutMeter.Mark(1) - - return nil, nil, errTimeout - - case res := <-resCh: - // Headers successfully retrieved, update the metrics - headerReqTimer.Update(time.Since(start)) - headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest)))) - - // Don't reject the packet even if it turns out to be bad, downloader will - // disconnect the peer on its own terms. Simply delivery the headers to - // be processed by the caller - res.Done <- nil - - return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil - } -} diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index 649aa2761596..9d8cd114c12a 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -76,7 +76,7 @@ type typedQueue interface { // concurrentFetch iteratively downloads scheduled block parts, taking available // peers, reserving a chunk of fetch requests for each and waiting for delivery // or timeouts. -func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { +func (d *Downloader) concurrentFetch(queue typedQueue) error { // Create a delivery channel to accept responses from all peers responses := make(chan *eth.Response) @@ -126,10 +126,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { // Prepare the queue and fetch block parts until the block header fetcher's done finished := false for { - // Short circuit if we lost all our peers - if d.peers.Len() == 0 && !beaconMode { - return errNoPeers - } // If there's nothing more to fetch, wait or terminate if queue.pending() == 0 { if len(pending) == 0 && finished { @@ -158,27 +154,20 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { } sort.Sort(&peerCapacitySort{idles, caps}) - var ( - progressed bool - throttled bool - queued = queue.pending() - ) + var throttled bool for _, peer := range idles { // Short circuit if throttling activated or there are no more // queued tasks to be retrieved if throttled { break } - if queued = queue.pending(); queued == 0 { + if queued := queue.pending(); queued == 0 { break } // Reserve a chunk of fetches for a peer. A nil can mean either that // no more headers are available, or that the peer is known not to // have them. - request, progress, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) - if progress { - progressed = true - } + request, _, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) if throttle { throttled = true throttleCounter.Inc(1) @@ -207,11 +196,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { timeout.Reset(ttl) } } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 && !beaconMode { - return errPeersUnavailable - } } // Wait for something to happen select { @@ -315,16 +299,6 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { queue.updateCapacity(peer, 0, 0) } else { d.dropPeer(peer.id) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := peer.id == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - return errTimeout - } } case res := <-responses: diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go index 5105fda66b3a..56359b33c94e 100644 --- a/eth/downloader/fetchers_concurrent_bodies.go +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -78,7 +78,6 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan if q.bodyFetchHook != nil { q.bodyFetchHook(req.Headers) } - hashes := make([]common.Hash, 0, len(req.Headers)) for _, header := range req.Headers { hashes = append(hashes, header.Hash()) diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go deleted file mode 100644 index 8201f4ca7423..000000000000 --- a/eth/downloader/fetchers_concurrent_headers.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/log" -) - -// headerQueue implements typedQueue and is a type adapter between the generic -// concurrent fetcher and the downloader. -type headerQueue Downloader - -// waker returns a notification channel that gets pinged in case more header -// fetches have been queued up, so the fetcher might assign it to idle peers. -func (q *headerQueue) waker() chan bool { - return q.queue.headerContCh -} - -// pending returns the number of headers that are currently queued for fetching -// by the concurrent downloader. -func (q *headerQueue) pending() int { - return q.queue.PendingHeaders() -} - -// capacity is responsible for calculating how many headers a particular peer is -// estimated to be able to retrieve within the allotted round trip time. -func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { - return peer.HeaderCapacity(rtt) -} - -// updateCapacity is responsible for updating how many headers a particular peer -// is estimated to be able to retrieve in a unit time. -func (q *headerQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { - peer.UpdateHeaderRate(items, span) -} - -// reserve is responsible for allocating a requested number of pending headers -// from the download queue to the specified peer. -func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { - return q.queue.ReserveHeaders(peer, items), false, false -} - -// unreserve is responsible for removing the current header retrieval allocation -// assigned to a specific peer and placing it back into the pool to allow -// reassigning to some other peer. -func (q *headerQueue) unreserve(peer string) int { - fails := q.queue.ExpireHeaders(peer) - if fails > 2 { - log.Trace("Header delivery timed out", "peer", peer) - } else { - log.Debug("Header delivery stalling", "peer", peer) - } - return fails -} - -// request is responsible for converting a generic fetch request into a header -// one and sending it to the remote peer for fulfillment. -func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { - peer.log.Trace("Requesting new batch of headers", "from", req.From) - return peer.peer.RequestHeadersByNumber(req.From, MaxHeaderFetch, 0, false, resCh) -} - -// deliver is responsible for taking a generic response packet from the concurrent -// fetcher, unpacking the header data and delivering it to the downloader's queue. -func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { - headers := *packet.Res.(*eth.BlockHeadersRequest) - hashes := packet.Meta.([]common.Hash) - - accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) - switch { - case err == nil && len(headers) == 0: - peer.log.Trace("Requested headers delivered") - case err == nil: - peer.log.Trace("Delivered new batch of headers", "count", len(headers), "accepted", accepted) - default: - peer.log.Debug("Failed to deliver retrieved headers", "err", err) - } - return accepted, err -} diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index d388b9ee4d46..9d8e1f313c24 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -23,13 +23,12 @@ import "fmt" type SyncMode uint32 const ( - FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks - SnapSync // Download the chain and the state via compact snapshots - LightSync // Download only the headers and terminate afterwards + FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks + SnapSync // Download the chain and the state via compact snapshots ) func (mode SyncMode) IsValid() bool { - return mode >= FullSync && mode <= LightSync + return mode == FullSync || mode == SnapSync } // String implements the stringer interface. @@ -39,8 +38,6 @@ func (mode SyncMode) String() string { return "full" case SnapSync: return "snap" - case LightSync: - return "light" default: return "unknown" } @@ -52,8 +49,6 @@ func (mode SyncMode) MarshalText() ([]byte, error) { return []byte("full"), nil case SnapSync: return []byte("snap"), nil - case LightSync: - return []byte("light"), nil default: return nil, fmt.Errorf("unknown sync mode %d", mode) } @@ -65,10 +60,8 @@ func (mode *SyncMode) UnmarshalText(text []byte) error { *mode = FullSync case "snap": *mode = SnapSync - case "light": - *mode = LightSync default: - return fmt.Errorf(`unknown sync mode %q, want "full", "snap" or "light"`, text) + return fmt.Errorf(`unknown sync mode %q, want "full" or "snap"`, text) } return nil } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 6ff858d7553e..267c23407f43 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -87,6 +87,15 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult { return item } +// body returns a representation of the fetch result as a types.Body object. +func (f *fetchResult) body() types.Body { + return types.Body{ + Transactions: f.Transactions, + Uncles: f.Uncles, + Withdrawals: f.Withdrawals, + } +} + // SetBodyDone flags the body as finished. func (f *fetchResult) SetBodyDone() { if v := f.pending.Load(); (v & (1 << bodyType)) != 0 { diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 3693ab095ff6..4aa97cf1f797 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -376,20 +377,9 @@ func TestSkeletonSyncInit(t *testing.T) { skeleton.Terminate() // Ensure the correct resulting sync status - var progress skeletonProgress - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - - if len(progress.Subchains) != len(tt.newstate) { - t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) - continue - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.newstate[j].Head { - t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) - } - if progress.Subchains[j].Tail != tt.newstate[j].Tail { - t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) - } + expect := skeletonExpect{state: tt.newstate} + if err := checkSkeletonProgress(db, false, nil, expect); err != nil { + t.Errorf("test %d: %v", i, err) } } } @@ -493,28 +483,36 @@ func TestSkeletonSyncExtend(t *testing.T) { skeleton.Terminate() // Ensure the correct resulting sync status - var progress skeletonProgress - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - - if len(progress.Subchains) != len(tt.newstate) { - t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) - continue - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.newstate[j].Head { - t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) - } - if progress.Subchains[j].Tail != tt.newstate[j].Tail { - t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) - } + expect := skeletonExpect{state: tt.newstate} + if err := checkSkeletonProgress(db, false, nil, expect); err != nil { + t.Errorf("test %d: %v", i, err) } } } +type skeletonExpect struct { + state []*subchain // Expected sync state after the post-init event + serve uint64 // Expected number of header retrievals after initial cycle + drop uint64 // Expected number of peers dropped after initial cycle +} + +type skeletonTest struct { + fill bool // Whether to run a real backfiller in this test case + unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments + + head *types.Header // New head header to announce to reorg to + peers []*skeletonTestPeer // Initial peer set to start the sync with + mid skeletonExpect + + newHead *types.Header // New header to anoint on top of the old one + newPeer *skeletonTestPeer // New peer to join the skeleton syncer + end skeletonExpect +} + // Tests that the skeleton sync correctly retrieves headers from one or more // peers without duplicates or other strange side effects. func TestSkeletonSyncRetrievals(t *testing.T) { - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //log.SetDefault(log.NewLogger(log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false)))) // Since skeleton headers don't need to be meaningful, beyond a parent hash // progression, create a long fake chain to test with. @@ -537,22 +535,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { Extra: []byte("B"), // force a different hash }) } - tests := []struct { - fill bool // Whether to run a real backfiller in this test case - unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments - - head *types.Header // New head header to announce to reorg to - peers []*skeletonTestPeer // Initial peer set to start the sync with - midstate []*subchain // Expected sync state after initial cycle - midserve uint64 // Expected number of header retrievals after initial cycle - middrop uint64 // Expected number of peers dropped after initial cycle - - newHead *types.Header // New header to anoint on top of the old one - newPeer *skeletonTestPeer // New peer to join the skeleton syncer - endstate []*subchain // Expected sync state after the post-init event - endserve uint64 // Expected number of header retrievals after the post-init event - enddrop uint64 // Expected number of peers dropped after the post-init event - }{ + tests := []skeletonTest{ // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. No peers however, so // the sync should be stuck without any progression. @@ -560,12 +543,16 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // When a new peer is added, it should detect the join and fill the headers // to the genesis block. { - head: chain[len(chain)-1], - midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, + head: chain[len(chain)-1], + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, + }, - newPeer: newSkeletonTestPeer("test-peer", chain), - endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - endserve: uint64(len(chain) - 2), // len - head - genesis + newPeer: newSkeletonTestPeer("test-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, }, // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. With one valid peer, @@ -573,14 +560,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // // Adding a second peer should not have any effect. { - head: chain[len(chain)-1], - peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, - midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - midserve: uint64(len(chain) - 2), // len - head - genesis - - newPeer: newSkeletonTestPeer("test-peer-2", chain), - endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - endserve: uint64(len(chain) - 2), // len - head - genesis + head: chain[len(chain)-1], + peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, + + newPeer: newSkeletonTestPeer("test-peer-2", chain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, }, // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. With many valid peers, @@ -594,12 +585,16 @@ func TestSkeletonSyncRetrievals(t *testing.T) { newSkeletonTestPeer("test-peer-2", chain), newSkeletonTestPeer("test-peer-3", chain), }, - midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - midserve: uint64(len(chain) - 2), // len - head - genesis + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, - newPeer: newSkeletonTestPeer("test-peer-4", chain), - endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - endserve: uint64(len(chain) - 2), // len - head - genesis + newPeer: newSkeletonTestPeer("test-peer-4", chain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, }, // This test checks if a peer tries to withhold a header - *on* the sync // boundary - instead of sending the requested amount. The malicious short @@ -611,14 +606,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 3, // len - head - genesis - missing - middrop: 1, // penalize shortened header deliveries + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 3, // len - head - genesis - missing + drop: 1, // penalize shortened header deliveries + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to withhold a header - *off* the sync // boundary - instead of sending the requested amount. The malicious short @@ -630,14 +629,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 3, // len - head - genesis - missing - middrop: 1, // penalize shortened header deliveries + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 3, // len - head - genesis - missing + drop: 1, // penalize shortened header deliveries + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to duplicate a header - *on* the sync // boundary - instead of sending the correct sequence. The malicious duped @@ -649,14 +652,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // penalize invalid header sequences + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // penalize invalid header sequences + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to duplicate a header - *off* the sync // boundary - instead of sending the correct sequence. The malicious duped @@ -668,14 +675,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // penalize invalid header sequences + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // penalize invalid header sequences + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to inject a different header - *on* // the sync boundary - instead of sending the correct sequence. The bad @@ -698,14 +709,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { ), ), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to inject a different header - *off* // the sync boundary - instead of sending the correct sequence. The bad @@ -728,14 +743,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { ), ), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // different set of headers, drop + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // different set of headers, drop + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test reproduces a bug caught during review (kudos to @holiman) // where a subchain is merged with a previously interrupted one, causing @@ -765,12 +784,16 @@ func TestSkeletonSyncRetrievals(t *testing.T) { return nil // Fallback to default behavior, just delayed }), }, - midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, - midserve: 2*requestHeaders - 1, // len - head - genesis + mid: skeletonExpect{ + state: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, + serve: 2*requestHeaders - 1, // len - head - genesis + }, - newHead: chain[2*requestHeaders+2], - endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, - endserve: 4 * requestHeaders, + newHead: chain[2*requestHeaders+2], + end: skeletonExpect{ + state: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, + serve: 4 * requestHeaders, + }, }, // This test reproduces a bug caught by (@rjl493456442) where a skeleton // header goes missing, causing the sync to get stuck and/or panic. @@ -792,13 +815,17 @@ func TestSkeletonSyncRetrievals(t *testing.T) { fill: true, unpredictable: true, // We have good and bad peer too, bad may be dropped, test too short for certainty - head: chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2 - peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)}, - midstate: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}}, + head: chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2 + peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)}, + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}}, + }, - newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4 - newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain), - endstate: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}}, + newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4 + newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}}, + }, }, } for i, tt := range tests { @@ -861,115 +888,83 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton := newSkeleton(db, peerset, drop, filler) skeleton.Sync(tt.head, nil, true) - var progress skeletonProgress // Wait a bit (bleah) for the initial sync loop to go to idle. This might // be either a finish or a never-start hence why there's no event to hook. - check := func() error { - if len(progress.Subchains) != len(tt.midstate) { - return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate)) - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.midstate[j].Head { - return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head) - } - if progress.Subchains[j].Tail != tt.midstate[j].Tail { - return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail) - } - } - return nil - } - waitStart := time.Now() for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 { time.Sleep(waitTime) - // Check the post-init end state if it matches the required results - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - if err := check(); err == nil { + if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err == nil { break } } - if err := check(); err != nil { - t.Error(err) + if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err != nil { + t.Errorf("test %d, mid: %v", i, err) continue } - if !tt.unpredictable { - var served uint64 - for _, peer := range tt.peers { - served += peer.served.Load() - } - if served != tt.midserve { - t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) - } - var drops uint64 - for _, peer := range tt.peers { - drops += peer.dropped.Load() - } - if drops != tt.middrop { - t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) - } - } + // Apply the post-init events if there's any - if tt.newHead != nil { - skeleton.Sync(tt.newHead, nil, true) - } + endpeers := tt.peers if tt.newPeer != nil { if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH68, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } + time.Sleep(time.Millisecond * 50) // given time for peer registration + endpeers = append(tt.peers, tt.newPeer) + } + if tt.newHead != nil { + skeleton.Sync(tt.newHead, nil, true) } + // Wait a bit (bleah) for the second sync loop to go to idle. This might // be either a finish or a never-start hence why there's no event to hook. - check = func() error { - if len(progress.Subchains) != len(tt.endstate) { - return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate)) - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.endstate[j].Head { - return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head) - } - if progress.Subchains[j].Tail != tt.endstate[j].Tail { - return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail) - } - } - return nil - } waitStart = time.Now() for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 { time.Sleep(waitTime) - // Check the post-init end state if it matches the required results - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - if err := check(); err == nil { + if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err == nil { break } } - if err := check(); err != nil { - t.Error(err) + if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err != nil { + t.Errorf("test %d, end: %v", i, err) continue } // Check that the peers served no more headers than we actually needed - if !tt.unpredictable { - served := uint64(0) - for _, peer := range tt.peers { - served += peer.served.Load() - } - if tt.newPeer != nil { - served += tt.newPeer.served.Load() - } - if served != tt.endserve { - t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) - } - drops := uint64(0) - for _, peer := range tt.peers { - drops += peer.dropped.Load() - } - if tt.newPeer != nil { - drops += tt.newPeer.dropped.Load() - } - if drops != tt.enddrop { - t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) - } - } // Clean up any leftover skeleton sync resources skeleton.Terminate() } } + +func checkSkeletonProgress(db ethdb.KeyValueReader, unpredictable bool, peers []*skeletonTestPeer, expected skeletonExpect) error { + var progress skeletonProgress + // Check the post-init end state if it matches the required results + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + + if len(progress.Subchains) != len(expected.state) { + return fmt.Errorf("subchain count mismatch: have %d, want %d", len(progress.Subchains), len(expected.state)) + } + for j := 0; j < len(progress.Subchains); j++ { + if progress.Subchains[j].Head != expected.state[j].Head { + return fmt.Errorf("subchain %d head mismatch: have %d, want %d", j, progress.Subchains[j].Head, expected.state[j].Head) + } + if progress.Subchains[j].Tail != expected.state[j].Tail { + return fmt.Errorf("subchain %d tail mismatch: have %d, want %d", j, progress.Subchains[j].Tail, expected.state[j].Tail) + } + } + if !unpredictable { + var served uint64 + for _, peer := range peers { + served += peer.served.Load() + } + if served != expected.serve { + return fmt.Errorf("served headers mismatch: have %d, want %d", served, expected.serve) + } + var drops uint64 + for _, peer := range peers { + drops += peer.dropped.Load() + } + if drops != expected.drop { + return fmt.Errorf("dropped peers mismatch: have %d, want %d", drops, expected.drop) + } + } + return nil +} diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index 46f3febd8ba8..6043f5137231 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -58,7 +58,6 @@ var pregenerated bool func init() { // Reduce some of the parameters to make the tester faster fullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 blockCacheMaxItems = 1024 fsHeaderSafetyNet = 256 fsHeaderContCheck = 500 * time.Millisecond diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go index f07f98956e3c..ac3b59e97e8b 100644 --- a/eth/gasestimator/gasestimator.go +++ b/eth/gasestimator/gasestimator.go @@ -80,6 +80,16 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin } available.Sub(available, call.Value) } + if opts.Config.IsCancun(opts.Header.Number, opts.Header.Time) && len(call.BlobHashes) > 0 { + blobGasPerBlob := new(big.Int).SetInt64(params.BlobTxBlobGasPerBlob) + blobBalanceUsage := new(big.Int).SetInt64(int64(len(call.BlobHashes))) + blobBalanceUsage.Mul(blobBalanceUsage, blobGasPerBlob) + blobBalanceUsage.Mul(blobBalanceUsage, call.BlobGasFeeCap) + if blobBalanceUsage.Cmp(available) >= 0 { + return 0, nil, core.ErrInsufficientFunds + } + available.Sub(available, blobBalanceUsage) + } allowance := new(big.Int).Div(available, feeCap) // If the allowance is larger than maximum uint64, skip checking diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 0410ae6b2de3..1e625e21c029 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -44,6 +44,8 @@ const ( // maxBlockFetchers is the max number of goroutines to spin up to pull blocks // for the fee history calculation (mostly relevant for LES). maxBlockFetchers = 4 + // maxQueryLimit is the max number of requested percentiles. + maxQueryLimit = 100 ) // blockFees represents a single block for processing @@ -240,6 +242,9 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL if len(rewardPercentiles) != 0 { maxFeeHistory = oracle.maxBlockHistory } + if len(rewardPercentiles) > maxQueryLimit { + return common.Big0, nil, nil, nil, nil, nil, fmt.Errorf("%w: over the query limit %d", errInvalidPercentile, maxQueryLimit) + } if blocks > maxFeeHistory { log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) blocks = maxFeeHistory diff --git a/eth/handler.go b/eth/handler.go index c7c582af407b..d5117584c001 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/triedb/pathdb" - "golang.org/x/crypto/sha3" ) const ( @@ -181,7 +180,7 @@ func newHandler(config *handlerConfig) (*handler, error) { return nil, errors.New("snap sync not supported with snapshots disabled") } // Construct the downloader (long sync) - h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) + h.downloader = downloader.New(config.Database, h.eventMux, h.chain, h.removePeer, h.enableSyncedFeatures) fetchTx := func(peer string, hashes []common.Hash) error { p := h.peers.peer(peer) @@ -480,7 +479,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( signer = types.LatestSignerForChainID(h.chain.Config().ChainID) // Don't care about chain status, we just need *a* sender - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) ) for _, tx := range txs { diff --git a/eth/protocols/snap/gentrie.go b/eth/protocols/snap/gentrie.go index 81c2640b62f3..6255fb221db1 100644 --- a/eth/protocols/snap/gentrie.go +++ b/eth/protocols/snap/gentrie.go @@ -164,7 +164,7 @@ func (t *pathTrie) deleteAccountNode(path []byte, inner bool) { } else { accountOuterLookupGauge.Inc(1) } - if !rawdb.ExistsAccountTrieNode(t.db, path) { + if !rawdb.HasAccountTrieNode(t.db, path) { return } if inner { @@ -181,7 +181,7 @@ func (t *pathTrie) deleteStorageNode(path []byte, inner bool) { } else { storageOuterLookupGauge.Inc(1) } - if !rawdb.ExistsStorageTrieNode(t.db, t.owner, path) { + if !rawdb.HasStorageTrieNode(t.db, t.owner, path) { return } if inner { diff --git a/eth/protocols/snap/progress_test.go b/eth/protocols/snap/progress_test.go index 9d923bd2f507..1d9a6b8474f8 100644 --- a/eth/protocols/snap/progress_test.go +++ b/eth/protocols/snap/progress_test.go @@ -80,7 +80,7 @@ func makeLegacyProgress() legacyProgress { Next: common.Hash{}, Last: common.Hash{0x77}, SubTasks: map[common.Hash][]*legacyStorageTask{ - common.Hash{0x1}: { + {0x1}: { { Next: common.Hash{}, Last: common.Hash{0xff}, diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index b0ddb8e403f7..88d7d34dcc66 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) const ( @@ -104,7 +103,7 @@ var ( // to allow concurrent retrievals. accountConcurrency = 16 - // storageConcurrency is the number of chunks to split the a large contract + // storageConcurrency is the number of chunks to split a large contract // storage trie into to allow concurrent retrievals. storageConcurrency = 16 ) @@ -2359,7 +2358,7 @@ func (s *Syncer) commitHealer(force bool) { } batch := s.db.NewBatch() if err := s.healer.scheduler.Commit(batch); err != nil { - log.Error("Failed to commit healing data", "err", err) + log.Crit("Failed to commit healing data", "err", err) } if err := batch.Write(); err != nil { log.Crit("Failed to persist healing data", "err", err) @@ -2653,7 +2652,7 @@ func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error // Cross reference the requested bytecodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) codes := make([][]byte, len(req.hashes)) @@ -2901,7 +2900,7 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error // Cross reference the requested trienodes with the response to find gaps // that the serving node is missing var ( - hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher = crypto.NewKeccakState() hash = make([]byte, 32) nodes = make([][]byte, len(req.hashes)) fills uint64 @@ -3007,7 +3006,7 @@ func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) e // Cross reference the requested bytecodes with the response to find gaps // that the serving node is missing - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() hash := make([]byte, 32) codes := make([][]byte, len(req.hashes)) @@ -3251,9 +3250,9 @@ func (t *healRequestSort) Merge() []TrieNodePathSet { // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates // the TrieNodePaths and merges paths which belongs to the same account path. func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) { - var syncPaths []trie.SyncPath - for _, path := range paths { - syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path))) + syncPaths := make([]trie.SyncPath, len(paths)) + for i, path := range paths { + syncPaths[i] = trie.NewSyncPath([]byte(path)) } n := &healRequestSort{paths, hashes, syncPaths} sort.Sort(n) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index f35babb73109..5f6826373a90 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -64,7 +64,7 @@ func TestHashing(t *testing.T) { } } var new = func() { - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() var hash = make([]byte, 32) for i := 0; i < len(bytecodes); i++ { hasher.Reset() @@ -96,7 +96,7 @@ func BenchmarkHashing(b *testing.B) { } } var new = func() { - hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hasher := crypto.NewKeccakState() var hash = make([]byte, 32) for i := 0; i < len(bytecodes); i++ { hasher.Reset() diff --git a/eth/tracers/api.go b/eth/tracers/api.go index d99531d48fc9..51b55ffdbb1b 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -22,7 +22,6 @@ import ( "encoding/json" "errors" "fmt" - "math/big" "os" "runtime" "sync" @@ -805,9 +804,13 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block // Execute the transaction and flush any traces to disk vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) statedb.SetTxContext(tx.Hash(), i) - vmConf.Tracer.OnTxStart(vmenv.GetVMContext(), tx, msg.From) + if vmConf.Tracer.OnTxStart != nil { + vmConf.Tracer.OnTxStart(vmenv.GetVMContext(), tx, msg.From) + } vmRet, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) - vmConf.Tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) + if vmConf.Tracer.OnTxEnd != nil { + vmConf.Tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) + } if writer != nil { writer.Flush() } @@ -982,7 +985,8 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor return nil, err } } - vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: big.NewInt(0)}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}) + // The actual TxContext will be created as part of ApplyTransactionWithEVM. + vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}) statedb.SetLogger(tracer.Hooks) // Define a meaningful timeout of a single transaction trace diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 36caee0dda45..6fbb50848d63 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -311,7 +312,7 @@ func TestTraceCall(t *testing.T) { config: &TraceCallConfig{TxIndex: uintPtr(1)}, expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr), }, - // After the target transaction, should be succeed + // After the target transaction, should be succeeded { blockNumber: rpc.BlockNumber(genBlocks - 1), call: ethapi.TransactionArgs{ @@ -994,3 +995,90 @@ func TestTraceChain(t *testing.T) { } } } + +// newTestMergedBackend creates a post-merge chain +func newTestMergedBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { + backend := &testBackend{ + chainConfig: gspec.Config, + engine: beacon.NewFaker(), + chaindb: rawdb.NewMemoryDatabase(), + } + // Generate blocks for testing + _, blocks, _ := core.GenerateChainWithGenesis(gspec, backend.engine, n, generator) + + // Import the canonical chain + cacheConfig := &core.CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + TrieDirtyDisabled: true, // Archive mode + } + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + backend.chain = chain + return backend +} + +func TestTraceBlockWithBasefee(t *testing.T) { + t.Parallel() + accounts := newAccounts(1) + target := common.HexToAddress("0x1111111111111111111111111111111111111111") + genesis := &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(1 * params.Ether)}, + target: {Nonce: 1, Code: []byte{ + byte(vm.BASEFEE), byte(vm.STOP), + }}, + }, + } + genBlocks := 1 + signer := types.HomesteadSigner{} + var txHash common.Hash + var baseFee = new(big.Int) + backend := newTestMergedBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: uint64(i), + To: &target, + Value: big.NewInt(0), + Gas: 5 * params.TxGas, + GasPrice: b.BaseFee(), + Data: nil}), + signer, accounts[0].key) + b.AddTx(tx) + txHash = tx.Hash() + baseFee.Set(b.BaseFee()) + }) + defer backend.chain.Stop() + api := NewAPI(backend) + + var testSuite = []struct { + blockNumber rpc.BlockNumber + config *TraceConfig + want string + }{ + // Trace head block + { + blockNumber: rpc.BlockNumber(genBlocks), + want: fmt.Sprintf(`[{"txHash":"%#x","result":{"gas":21002,"failed":false,"returnValue":"","structLogs":[{"pc":0,"op":"BASEFEE","gas":84000,"gasCost":2,"depth":1,"stack":[]},{"pc":1,"op":"STOP","gas":83998,"gasCost":0,"depth":1,"stack":["%#x"]}]}}]`, txHash, baseFee), + }, + } + for i, tc := range testSuite { + result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) + if err != nil { + t.Errorf("test %d, want no error, have %v", i, err) + continue + } + have, _ := json.Marshal(result) + want := tc.want + if string(have) != want { + t.Errorf("test %d, result mismatch\nhave: %v\nwant: %v\n", i, string(have), want) + } + } +} diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go new file mode 100644 index 000000000000..2d4f1b089006 --- /dev/null +++ b/eth/tracers/internal/tracetest/supply_test.go @@ -0,0 +1,613 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracetest + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "math/big" + "os" + "path" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" + + // Force-load live packages, to trigger registration + _ "github.com/ethereum/go-ethereum/eth/tracers/live" +) + +type supplyInfoIssuance struct { + GenesisAlloc *hexutil.Big `json:"genesisAlloc,omitempty"` + Reward *hexutil.Big `json:"reward,omitempty"` + Withdrawals *hexutil.Big `json:"withdrawals,omitempty"` +} + +type supplyInfoBurn struct { + EIP1559 *hexutil.Big `json:"1559,omitempty"` + Blob *hexutil.Big `json:"blob,omitempty"` + Misc *hexutil.Big `json:"misc,omitempty"` +} + +type supplyInfo struct { + Issuance *supplyInfoIssuance `json:"issuance,omitempty"` + Burn *supplyInfoBurn `json:"burn,omitempty"` + + // Block info + Number uint64 `json:"blockNumber"` + Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash"` +} + +func emptyBlockGenerationFunc(b *core.BlockGen) {} + +func TestSupplyOmittedFields(t *testing.T) { + var ( + config = *params.MergedTestChainConfig + gspec = &core.Genesis{ + Config: &config, + } + ) + + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + + out, _, err := testSupplyTracer(t, gspec, func(b *core.BlockGen) { + b.SetPoS() + }) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + expected := supplyInfo{ + Number: 0, + Hash: common.HexToHash("0x52f276d96f0afaaf2c3cb358868bdc2779c4b0cb8de3e7e5302e247c0b66a703"), + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + } + actual := out[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func TestSupplyGenesisAlloc(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + + config = *params.AllEthashProtocolChanges + + gspec = &core.Genesis{ + Config: &config, + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + addr2: {Balance: eth1}, + }, + } + ) + + expected := supplyInfo{ + Issuance: &supplyInfoIssuance{ + GenesisAlloc: (*hexutil.Big)(new(big.Int).Mul(common.Big2, big.NewInt(params.Ether))), + }, + Number: 0, + Hash: common.HexToHash("0xbcc9466e9fc6a8b56f4b29ca353a421ff8b51a0c1a58ca4743b427605b08f2ca"), + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + } + + out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + actual := out[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func TestSupplyRewards(t *testing.T) { + var ( + config = *params.AllEthashProtocolChanges + + gspec = &core.Genesis{ + Config: &config, + } + ) + + expected := supplyInfo{ + Issuance: &supplyInfoIssuance{ + Reward: (*hexutil.Big)(new(big.Int).Mul(common.Big2, big.NewInt(params.Ether))), + }, + Number: 1, + Hash: common.HexToHash("0xcbb08370505be503dafedc4e96d139ea27aba3cbc580148568b8a307b3f51052"), + ParentHash: common.HexToHash("0xadeda0a83e337b6c073e3f0e9a17531a04009b397a9588c093b628f21b8bc5a3"), + } + + out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + actual := out[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func TestSupplyEip1559Burn(t *testing.T) { + var ( + config = *params.AllEthashProtocolChanges + + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + // A sender who makes transactions, has some eth1 + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + gwei5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.GWei)) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + + gspec = &core.Genesis{ + Config: &config, + BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + }, + } + ) + + signer := types.LatestSigner(gspec.Config) + + eip1559BlockGenerationFunc := func(b *core.BlockGen) { + txdata := &types.DynamicFeeTx{ + ChainID: gspec.Config.ChainID, + Nonce: 0, + To: &aa, + Gas: 21000, + GasFeeCap: gwei5, + GasTipCap: big.NewInt(2), + } + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + } + + out, chain, err := testSupplyTracer(t, gspec, eip1559BlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + var ( + head = chain.CurrentBlock() + reward = new(big.Int).Mul(common.Big2, big.NewInt(params.Ether)) + burn = new(big.Int).Mul(big.NewInt(21000), head.BaseFee) + expected = supplyInfo{ + Issuance: &supplyInfoIssuance{ + Reward: (*hexutil.Big)(reward), + }, + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(burn), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + ) + + actual := out[expected.Number] + compareAsJSON(t, expected, actual) +} + +func TestSupplyWithdrawals(t *testing.T) { + var ( + config = *params.MergedTestChainConfig + gspec = &core.Genesis{ + Config: &config, + } + ) + + withdrawalsBlockGenerationFunc := func(b *core.BlockGen) { + b.SetPoS() + + b.AddWithdrawal(&types.Withdrawal{ + Validator: 42, + Address: common.Address{0xee}, + Amount: 1337, + }) + } + + out, chain, err := testSupplyTracer(t, gspec, withdrawalsBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + var ( + head = chain.CurrentBlock() + expected = supplyInfo{ + Issuance: &supplyInfoIssuance{ + Withdrawals: (*hexutil.Big)(big.NewInt(1337000000000)), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + actual = out[expected.Number] + ) + + compareAsJSON(t, expected, actual) +} + +// Tests fund retrieval after contract's selfdestruct. +// Contract A calls contract B which selfdestructs, but B receives eth1 +// after the selfdestruct opcode executes from Contract A. +// Because Contract B is removed only at the end of the transaction +// the ether sent in between is burnt before Cancun hard fork. +func TestSupplySelfdestruct(t *testing.T) { + var ( + config = *params.TestChainConfig + + aa = common.HexToAddress("0x1111111111111111111111111111111111111111") + bb = common.HexToAddress("0x2222222222222222222222222222222222222222") + dad = common.HexToAddress("0x0000000000000000000000000000000000000dad") + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + gwei5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.GWei)) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + + gspec = &core.Genesis{ + Config: &config, + BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + aa: { + Code: common.FromHex("0x61face60f01b6000527322222222222222222222222222222222222222226000806002600080855af160008103603457600080fd5b60008060008034865af1905060008103604c57600080fd5b5050"), + // Nonce: 0, + Balance: big.NewInt(0), + }, + bb: { + Code: common.FromHex("0x6000357fface000000000000000000000000000000000000000000000000000000000000808203602f57610dad80ff5b5050"), + Nonce: 0, + Balance: eth1, + }, + }, + } + ) + + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + + signer := types.LatestSigner(gspec.Config) + + testBlockGenerationFunc := func(b *core.BlockGen) { + b.SetPoS() + + txdata := &types.LegacyTx{ + Nonce: 0, + To: &aa, + Value: gwei5, + Gas: 150000, + GasPrice: gwei5, + Data: []byte{}, + } + + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + } + + // 1. Test pre Cancun + preCancunOutput, preCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) + if err != nil { + t.Fatalf("Pre-cancun failed to test supply tracer: %v", err) + } + + // Check balance at state: + // 1. 0x0000...000dad has 1 ether + // 2. A has 0 ether + // 3. B has 0 ether + statedb, _ := preCancunChain.State() + if got, exp := statedb.GetBalance(dad), eth1; got.CmpBig(exp) != 0 { + t.Fatalf("Pre-cancun address \"%v\" balance, got %v exp %v\n", dad, got, exp) + } + if got, exp := statedb.GetBalance(aa), big.NewInt(0); got.CmpBig(exp) != 0 { + t.Fatalf("Pre-cancun address \"%v\" balance, got %v exp %v\n", aa, got, exp) + } + if got, exp := statedb.GetBalance(bb), big.NewInt(0); got.CmpBig(exp) != 0 { + t.Fatalf("Pre-cancun address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + + head := preCancunChain.CurrentBlock() + // Check live trace output + expected := supplyInfo{ + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(big.NewInt(55289500000000)), + Misc: (*hexutil.Big)(big.NewInt(5000000000)), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + + actual := preCancunOutput[expected.Number] + + compareAsJSON(t, expected, actual) + + // 2. Test post Cancun + cancunTime := uint64(0) + gspec.Config.ShanghaiTime = &cancunTime + gspec.Config.CancunTime = &cancunTime + + postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) + if err != nil { + t.Fatalf("Post-cancun failed to test supply tracer: %v", err) + } + + // Check balance at state: + // 1. 0x0000...000dad has 1 ether + // 3. A has 0 ether + // 3. B has 5 gwei + statedb, _ = postCancunChain.State() + if got, exp := statedb.GetBalance(dad), eth1; got.CmpBig(exp) != 0 { + t.Fatalf("Post-shanghai address \"%v\" balance, got %v exp %v\n", dad, got, exp) + } + if got, exp := statedb.GetBalance(aa), big.NewInt(0); got.CmpBig(exp) != 0 { + t.Fatalf("Post-shanghai address \"%v\" balance, got %v exp %v\n", aa, got, exp) + } + if got, exp := statedb.GetBalance(bb), gwei5; got.CmpBig(exp) != 0 { + t.Fatalf("Post-shanghai address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + + // Check live trace output + head = postCancunChain.CurrentBlock() + expected = supplyInfo{ + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(big.NewInt(55289500000000)), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + + actual = postCancunOutput[expected.Number] + + compareAsJSON(t, expected, actual) +} + +// Tests selfdestructing contract to send its balance to itself (burn). +// It tests both cases of selfdestructing succeding and being reverted. +// - Contract A calls B and D. +// - Contract B selfdestructs and sends the eth1 to itself (Burn amount to be counted). +// - Contract C selfdestructs and sends the eth1 to itself. +// - Contract D calls C and reverts (Burn amount of C +// has to be reverted as well). +func TestSupplySelfdestructItselfAndRevert(t *testing.T) { + var ( + config = *params.TestChainConfig + + aa = common.HexToAddress("0x1111111111111111111111111111111111111111") + bb = common.HexToAddress("0x2222222222222222222222222222222222222222") + cc = common.HexToAddress("0x3333333333333333333333333333333333333333") + dd = common.HexToAddress("0x4444444444444444444444444444444444444444") + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + gwei5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.GWei)) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + eth2 = new(big.Int).Mul(common.Big2, big.NewInt(params.Ether)) + eth5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.Ether)) + + gspec = &core.Genesis{ + Config: &config, + // BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + aa: { + // Contract code in YUL: + // + // object "ContractA" { + // code { + // let B := 0x2222222222222222222222222222222222222222 + // let D := 0x4444444444444444444444444444444444444444 + + // // Call to Contract B + // let resB:= call(gas(), B, 0, 0x0, 0x0, 0, 0) + + // // Call to Contract D + // let resD := call(gas(), D, 0, 0x0, 0x0, 0, 0) + // } + // } + Code: common.FromHex("0x73222222222222222222222222222222222222222273444444444444444444444444444444444444444460006000600060006000865af160006000600060006000865af150505050"), + Balance: common.Big0, + }, + bb: { + // Contract code in YUL: + // + // object "ContractB" { + // code { + // let self := address() + // selfdestruct(self) + // } + // } + Code: common.FromHex("0x3080ff50"), + Balance: eth5, + }, + cc: { + Code: common.FromHex("0x3080ff50"), + Balance: eth1, + }, + dd: { + // Contract code in YUL: + // + // object "ContractD" { + // code { + // let C := 0x3333333333333333333333333333333333333333 + + // // Call to Contract C + // let resC := call(gas(), C, 0, 0x0, 0x0, 0, 0) + + // // Revert + // revert(0, 0) + // } + // } + Code: common.FromHex("0x73333333333333333333333333333333333333333360006000600060006000855af160006000fd5050"), + Balance: eth2, + }, + }, + } + ) + + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + + signer := types.LatestSigner(gspec.Config) + + testBlockGenerationFunc := func(b *core.BlockGen) { + b.SetPoS() + + txdata := &types.LegacyTx{ + Nonce: 0, + To: &aa, + Value: common.Big0, + Gas: 150000, + GasPrice: gwei5, + Data: []byte{}, + } + + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + } + + output, chain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + // Check balance at state: + // 1. A has 0 ether + // 2. B has 0 ether, burned + // 3. C has 2 ether, selfdestructed but parent D reverted + // 4. D has 1 ether, reverted + statedb, _ := chain.State() + if got, exp := statedb.GetBalance(aa), common.Big0; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", aa, got, exp) + } + if got, exp := statedb.GetBalance(bb), common.Big0; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + if got, exp := statedb.GetBalance(cc), eth1; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + if got, exp := statedb.GetBalance(dd), eth2; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + + // Check live trace output + block := chain.GetBlockByNumber(1) + + expected := supplyInfo{ + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(new(big.Int).Mul(block.BaseFee(), big.NewInt(int64(block.GasUsed())))), + Misc: (*hexutil.Big)(eth5), // 5ETH burned from contract B + }, + Number: 1, + Hash: block.Hash(), + ParentHash: block.ParentHash(), + } + + actual := output[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockGen)) ([]supplyInfo, *core.BlockChain, error) { + var ( + engine = beacon.New(ethash.NewFaker()) + ) + + traceOutputPath := filepath.ToSlash(t.TempDir()) + traceOutputFilename := path.Join(traceOutputPath, "supply.jsonl") + + // Load supply tracer + tracer, err := tracers.LiveDirectory.New("supply", json.RawMessage(fmt.Sprintf(`{"path":"%s"}`, traceOutputPath))) + if err != nil { + return nil, nil, fmt.Errorf("failed to create call tracer: %v", err) + } + + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), core.DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, engine, vm.Config{Tracer: tracer}, nil, nil) + if err != nil { + return nil, nil, fmt.Errorf("failed to create tester chain: %v", err) + } + defer chain.Stop() + + _, blocks, _ := core.GenerateChainWithGenesis(genesis, engine, 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + gen(b) + }) + + if n, err := chain.InsertChain(blocks); err != nil { + return nil, chain, fmt.Errorf("block %d: failed to insert into chain: %v", n, err) + } + + // Check and compare the results + file, err := os.OpenFile(traceOutputFilename, os.O_RDONLY, 0666) + if err != nil { + return nil, chain, fmt.Errorf("failed to open output file: %v", err) + } + defer file.Close() + + var output []supplyInfo + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + blockBytes := scanner.Bytes() + + var info supplyInfo + if err := json.Unmarshal(blockBytes, &info); err != nil { + return nil, chain, fmt.Errorf("failed to unmarshal result: %v", err) + } + + output = append(output, info) + } + + return output, chain, nil +} + +func compareAsJSON(t *testing.T, expected interface{}, actual interface{}) { + want, err := json.Marshal(expected) + if err != nil { + t.Fatalf("failed to marshal expected value to JSON: %v", err) + } + + have, err := json.Marshal(actual) + if err != nil { + t.Fatalf("failed to marshal actual value to JSON: %v", err) + } + + if !bytes.Equal(want, have) { + t.Fatalf("incorrect supply info: expected %s, got %s", string(want), string(have)) + } +} diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json index c46fe080f7f2..a9092bbcf02a 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json @@ -80,8 +80,9 @@ "from": "0x0047a8033cc6d6ca2ed5044674fd421f44884de8", "gas": "0x1b7740", "gasUsed": "0x9274f", + "to": "0xc24431c1a1147456414355b1f1769de450e524da", "input": "0x606060405260018054600160a060020a0319163317905561036f600360609081527f55524c0000000000000000000000000000000000000000000000000000000000608052610120604052604c60a09081527f6a736f6e2868747470733a2f2f6170692e6b72616b656e2e636f6d2f302f707560c0527f626c69632f5469636b65723f706169723d455448584254292e726573756c742e60e0527f58455448585842542e632e3000000000000000000000000000000000000000006101005261037d919062030d417f38cc483100000000000000000000000000000000000000000000000000000000610120908152600090731d11e5eae3112dbd44f99266872ff1d07c77dce89081906338cc4831906101249060209060048188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc8887604051837c010000000000000000000000000000000000000000000000000000000002815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156102255780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f11561000257505060405180517f385928320000000000000000000000000000000000000000000000000000000082526004828101888152606484018a90526080602485018181528d5160848701528d519496508a958e958e958e9594604484019360a40192909181908490829085908e906020601f850104600302600f01f150905090810190601f1680156102e65780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561033f5780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f11561000257505060405151979650505050505050565b611af2806103806000396000f35b5056606060405236156100985760e060020a6000350463056e1059811461009a57806327dc297e14610391578063346b306a146103e257806341c0e1b51461075e578063489306eb146107855780635731f35714610a5e57806365a4dfb314610de05780637975c56e14611179578063a2e6204514611458578063ae152cf414611528578063b77644751461181b578063d594877014611876575b005b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561025e5780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103085780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103615780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f115610002575050604051519350610dd892505050565b60408051602060248035600481810135601f81018590048502860185019096528585526100989581359591946044949293909201918190840183828082843750949650505050505050611a2761187a565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156105d15780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106795780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106d25780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561072b5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b9392505050565b610098600154600160a060020a03908116339091161415611a255733600160a060020a0316ff5b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109345780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150600087876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109d75780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610a305780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f115610002575050604051519695505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610c535780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610cfa5780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610d535780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610dac5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b949350505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663fbf80418600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc89876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610fe45780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015089898989896040518760e060020a028152600401808681526020018060200180602001806020018581526020018481038452888181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110935780820380516001836020036101000a031916815260200191505b508481038352878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110ec5780820380516001836020036101000a031916815260200191505b508481038252868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156111455780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038185886185025a03f115610002575050604051519998505050505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561132e5780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f11561000257505050604051805190602001508787876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156113d05780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156114295780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6100985b611aef604060405190810160405280600381526020017f55524c0000000000000000000000000000000000000000000000000000000000815260200150608060405190810160405280604c81526020017f6a736f6e2868747470733a2f2f6170692e6b72616b656e2e636f6d2f302f707581526020017f626c69632f5469636b65723f706169723d455448584254292e726573756c742e81526020017f58455448585842542e632e30000000000000000000000000000000000000000081526020015062030d416115ae565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f810183900483028401830190945283835297999860449892975091909101945090925082915084018382808284375094965050933593505050505b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156116e75780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117925780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117eb5780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6040805160028054602060018216156101000260001901909116829004601f81018290048202840182019094528383526119679390830182828015611a1d5780601f106119f257610100808354040283529160200191611a1d565b6119d55b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604080518051855473ffffffffffffffffffffffffffffffffffffffff1916178086557f4c7737950000000000000000000000000000000000000000000000000000000082529151600160a060020a03929092169250634c773795916004828101926020929190829003018188876161da5a03f115610002575050604051519250505090565b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156119c75780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311611a0057829003601f168201915b505050505081565b565b600160a060020a031633600160a060020a0316141515611a4657610002565b8060026000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aad57805160ff19168380011785555b50611add9291505b80821115611ae75760008155600101611a99565b82800160010185558215611a91579182015b82811115611a91578251826000505591602001919060010190611abf565b5050611aeb61145c565b5090565b5050565b5056", - "error": "contract creation code storage out of gas", + "output": "0x606060405236156100985760e060020a6000350463056e1059811461009a57806327dc297e14610391578063346b306a146103e257806341c0e1b51461075e578063489306eb146107855780635731f35714610a5e57806365a4dfb314610de05780637975c56e14611179578063a2e6204514611458578063ae152cf414611528578063b77644751461181b578063d594877014611876575b005b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561025e5780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103085780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103615780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f115610002575050604051519350610dd892505050565b60408051602060248035600481810135601f81018590048502860185019096528585526100989581359591946044949293909201918190840183828082843750949650505050505050611a2761187a565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156105d15780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106795780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106d25780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561072b5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b9392505050565b610098600154600160a060020a03908116339091161415611a255733600160a060020a0316ff5b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109345780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150600087876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109d75780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610a305780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f115610002575050604051519695505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610c535780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610cfa5780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610d535780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610dac5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b949350505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663fbf80418600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc89876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610fe45780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015089898989896040518760e060020a028152600401808681526020018060200180602001806020018581526020018481038452888181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110935780820380516001836020036101000a031916815260200191505b508481038352878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110ec5780820380516001836020036101000a031916815260200191505b508481038252868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156111455780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038185886185025a03f115610002575050604051519998505050505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561132e5780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f11561000257505050604051805190602001508787876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156113d05780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156114295780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6100985b611aef604060405190810160405280600381526020017f55524c0000000000000000000000000000000000000000000000000000000000815260200150608060405190810160405280604c81526020017f6a736f6e2868747470733a2f2f6170692e6b72616b656e2e636f6d2f302f707581526020017f626c69632f5469636b65723f706169723d455448584254292e726573756c742e81526020017f58455448585842542e632e30000000000000000000000000000000000000000081526020015062030d416115ae565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f810183900483028401830190945283835297999860449892975091909101945090925082915084018382808284375094965050933593505050505b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156116e75780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117925780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117eb5780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6040805160028054602060018216156101000260001901909116829004601f81018290048202840182019094528383526119679390830182828015611a1d5780601f106119f257610100808354040283529160200191611a1d565b6119d55b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604080518051855473ffffffffffffffffffffffffffffffffffffffff1916178086557f4c7737950000000000000000000000000000000000000000000000000000000082529151600160a060020a03929092169250634c773795916004828101926020929190829003018188876161da5a03f115610002575050604051519250505090565b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156119c75780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311611a0057829003601f168201915b505050505081565b565b600160a060020a031633600160a060020a0316141515611a4657610002565b8060026000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aad57805160ff19168380011785555b50611add9291505b80821115611ae75760008155600101611a99565b82800160010185558215611a91579182015b82811115611a91578251826000505591602001919060010190611abf565b5050611aeb61145c565b5090565b5050565b5056", "calls": [ { "from": "0xc24431c1a1147456414355b1f1769de450e524da", diff --git a/eth/tracers/live/gen_supplyinfoburn.go b/eth/tracers/live/gen_supplyinfoburn.go new file mode 100644 index 000000000000..d01eda3975da --- /dev/null +++ b/eth/tracers/live/gen_supplyinfoburn.go @@ -0,0 +1,49 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package live + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*supplyInfoBurnMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s supplyInfoBurn) MarshalJSON() ([]byte, error) { + type supplyInfoBurn struct { + EIP1559 *hexutil.Big `json:"1559,omitempty"` + Blob *hexutil.Big `json:"blob,omitempty"` + Misc *hexutil.Big `json:"misc,omitempty"` + } + var enc supplyInfoBurn + enc.EIP1559 = (*hexutil.Big)(s.EIP1559) + enc.Blob = (*hexutil.Big)(s.Blob) + enc.Misc = (*hexutil.Big)(s.Misc) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *supplyInfoBurn) UnmarshalJSON(input []byte) error { + type supplyInfoBurn struct { + EIP1559 *hexutil.Big `json:"1559,omitempty"` + Blob *hexutil.Big `json:"blob,omitempty"` + Misc *hexutil.Big `json:"misc,omitempty"` + } + var dec supplyInfoBurn + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.EIP1559 != nil { + s.EIP1559 = (*big.Int)(dec.EIP1559) + } + if dec.Blob != nil { + s.Blob = (*big.Int)(dec.Blob) + } + if dec.Misc != nil { + s.Misc = (*big.Int)(dec.Misc) + } + return nil +} diff --git a/eth/tracers/live/gen_supplyinfoissuance.go b/eth/tracers/live/gen_supplyinfoissuance.go new file mode 100644 index 000000000000..e2536ee3252d --- /dev/null +++ b/eth/tracers/live/gen_supplyinfoissuance.go @@ -0,0 +1,49 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package live + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*supplyInfoIssuanceMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s supplyInfoIssuance) MarshalJSON() ([]byte, error) { + type supplyInfoIssuance struct { + GenesisAlloc *hexutil.Big `json:"genesisAlloc,omitempty"` + Reward *hexutil.Big `json:"reward,omitempty"` + Withdrawals *hexutil.Big `json:"withdrawals,omitempty"` + } + var enc supplyInfoIssuance + enc.GenesisAlloc = (*hexutil.Big)(s.GenesisAlloc) + enc.Reward = (*hexutil.Big)(s.Reward) + enc.Withdrawals = (*hexutil.Big)(s.Withdrawals) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *supplyInfoIssuance) UnmarshalJSON(input []byte) error { + type supplyInfoIssuance struct { + GenesisAlloc *hexutil.Big `json:"genesisAlloc,omitempty"` + Reward *hexutil.Big `json:"reward,omitempty"` + Withdrawals *hexutil.Big `json:"withdrawals,omitempty"` + } + var dec supplyInfoIssuance + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.GenesisAlloc != nil { + s.GenesisAlloc = (*big.Int)(dec.GenesisAlloc) + } + if dec.Reward != nil { + s.Reward = (*big.Int)(dec.Reward) + } + if dec.Withdrawals != nil { + s.Withdrawals = (*big.Int)(dec.Withdrawals) + } + return nil +} diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go new file mode 100644 index 000000000000..0c9141e99d9f --- /dev/null +++ b/eth/tracers/live/supply.go @@ -0,0 +1,310 @@ +package live + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/log" + "gopkg.in/natefinch/lumberjack.v2" +) + +func init() { + tracers.LiveDirectory.Register("supply", newSupply) +} + +type supplyInfoIssuance struct { + GenesisAlloc *big.Int `json:"genesisAlloc,omitempty"` + Reward *big.Int `json:"reward,omitempty"` + Withdrawals *big.Int `json:"withdrawals,omitempty"` +} + +//go:generate go run github.com/fjl/gencodec -type supplyInfoIssuance -field-override supplyInfoIssuanceMarshaling -out gen_supplyinfoissuance.go +type supplyInfoIssuanceMarshaling struct { + GenesisAlloc *hexutil.Big + Reward *hexutil.Big + Withdrawals *hexutil.Big +} + +type supplyInfoBurn struct { + EIP1559 *big.Int `json:"1559,omitempty"` + Blob *big.Int `json:"blob,omitempty"` + Misc *big.Int `json:"misc,omitempty"` +} + +//go:generate go run github.com/fjl/gencodec -type supplyInfoBurn -field-override supplyInfoBurnMarshaling -out gen_supplyinfoburn.go +type supplyInfoBurnMarshaling struct { + EIP1559 *hexutil.Big + Blob *hexutil.Big + Misc *hexutil.Big +} + +type supplyInfo struct { + Issuance *supplyInfoIssuance `json:"issuance,omitempty"` + Burn *supplyInfoBurn `json:"burn,omitempty"` + + // Block info + Number uint64 `json:"blockNumber"` + Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash"` +} + +type supplyTxCallstack struct { + calls []supplyTxCallstack + burn *big.Int +} + +type supply struct { + delta supplyInfo + txCallstack []supplyTxCallstack // Callstack for current transaction + logger *lumberjack.Logger +} + +type supplyTracerConfig struct { + Path string `json:"path"` // Path to the directory where the tracer logs will be stored + MaxSize int `json:"maxSize"` // MaxSize is the maximum size in megabytes of the tracer log file before it gets rotated. It defaults to 100 megabytes. +} + +func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) { + var config supplyTracerConfig + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, fmt.Errorf("failed to parse config: %v", err) + } + } + if config.Path == "" { + return nil, errors.New("supply tracer output path is required") + } + + // Store traces in a rotating file + logger := &lumberjack.Logger{ + Filename: filepath.Join(config.Path, "supply.jsonl"), + } + if config.MaxSize > 0 { + logger.MaxSize = config.MaxSize + } + + t := &supply{ + delta: newSupplyInfo(), + logger: logger, + } + return &tracing.Hooks{ + OnBlockStart: t.OnBlockStart, + OnBlockEnd: t.OnBlockEnd, + OnGenesisBlock: t.OnGenesisBlock, + OnTxStart: t.OnTxStart, + OnBalanceChange: t.OnBalanceChange, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnClose: t.OnClose, + }, nil +} + +func newSupplyInfo() supplyInfo { + return supplyInfo{ + Issuance: &supplyInfoIssuance{ + GenesisAlloc: big.NewInt(0), + Reward: big.NewInt(0), + Withdrawals: big.NewInt(0), + }, + Burn: &supplyInfoBurn{ + EIP1559: big.NewInt(0), + Blob: big.NewInt(0), + Misc: big.NewInt(0), + }, + + Number: 0, + Hash: common.Hash{}, + ParentHash: common.Hash{}, + } +} + +func (s *supply) resetDelta() { + s.delta = newSupplyInfo() +} + +func (s *supply) OnBlockStart(ev tracing.BlockEvent) { + s.resetDelta() + + s.delta.Number = ev.Block.NumberU64() + s.delta.Hash = ev.Block.Hash() + s.delta.ParentHash = ev.Block.ParentHash() + + // Calculate Burn for this block + if ev.Block.BaseFee() != nil { + burn := new(big.Int).Mul(new(big.Int).SetUint64(ev.Block.GasUsed()), ev.Block.BaseFee()) + s.delta.Burn.EIP1559 = burn + } + // Blob burnt gas + if blobGas := ev.Block.BlobGasUsed(); blobGas != nil && *blobGas > 0 && ev.Block.ExcessBlobGas() != nil { + var ( + excess = *ev.Block.ExcessBlobGas() + baseFee = eip4844.CalcBlobFee(excess) + burn = new(big.Int).Mul(new(big.Int).SetUint64(*blobGas), baseFee) + ) + s.delta.Burn.Blob = burn + } +} + +func (s *supply) OnBlockEnd(err error) { + s.write(s.delta) +} + +func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) { + s.resetDelta() + + s.delta.Number = b.NumberU64() + s.delta.Hash = b.Hash() + s.delta.ParentHash = b.ParentHash() + + // Initialize supply with total allocation in genesis block + for _, account := range alloc { + s.delta.Issuance.GenesisAlloc.Add(s.delta.Issuance.GenesisAlloc, account.Balance) + } + + s.write(s.delta) +} + +func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) { + diff := new(big.Int).Sub(newBalance, prevBalance) + + // NOTE: don't handle "BalanceIncreaseGenesisBalance" because it is handled in OnGenesisBlock + switch reason { + case tracing.BalanceIncreaseRewardMineUncle: + case tracing.BalanceIncreaseRewardMineBlock: + s.delta.Issuance.Reward.Add(s.delta.Issuance.Reward, diff) + case tracing.BalanceIncreaseWithdrawal: + s.delta.Issuance.Withdrawals.Add(s.delta.Issuance.Withdrawals, diff) + case tracing.BalanceDecreaseSelfdestructBurn: + // BalanceDecreaseSelfdestructBurn is non-reversible as it happens + // at the end of the transaction. + s.delta.Burn.Misc.Sub(s.delta.Burn.Misc, diff) + default: + return + } +} + +func (s *supply) OnTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) { + s.txCallstack = make([]supplyTxCallstack, 0, 1) +} + +// internalTxsHandler handles internal transactions burned amount +func (s *supply) internalTxsHandler(call *supplyTxCallstack) { + // Handle Burned amount + if call.burn != nil { + s.delta.Burn.Misc.Add(s.delta.Burn.Misc, call.burn) + } + + if len(call.calls) > 0 { + // Recursivelly handle internal calls + for _, call := range call.calls { + callCopy := call + s.internalTxsHandler(&callCopy) + } + } +} + +func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + call := supplyTxCallstack{ + calls: make([]supplyTxCallstack, 0), + } + + // This is a special case of burned amount which has to be handled here + // which happens when type == selfdestruct and from == to. + if vm.OpCode(typ) == vm.SELFDESTRUCT && from == to && value.Cmp(common.Big0) == 1 { + call.burn = value + } + + // Append call to the callstack, so we can fill the details in CaptureExit + s.txCallstack = append(s.txCallstack, call) +} + +func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if depth == 0 { + // No need to handle Burned amount if transaction is reverted + if !reverted { + s.internalTxsHandler(&s.txCallstack[0]) + } + return + } + + size := len(s.txCallstack) + if size <= 1 { + return + } + // Pop call + call := s.txCallstack[size-1] + s.txCallstack = s.txCallstack[:size-1] + size -= 1 + + // In case of a revert, we can drop the call and all its subcalls. + // Caution, that this has to happen after popping the call from the stack. + if reverted { + return + } + s.txCallstack[size-1].calls = append(s.txCallstack[size-1].calls, call) +} + +func (s *supply) OnClose() { + if err := s.logger.Close(); err != nil { + log.Warn("failed to close supply tracer log file", "error", err) + } +} + +func (s *supply) write(data any) { + supply, ok := data.(supplyInfo) + if !ok { + log.Warn("failed to cast supply tracer data on write to log file") + return + } + + // Remove empty fields + if supply.Issuance.GenesisAlloc.Sign() == 0 { + supply.Issuance.GenesisAlloc = nil + } + + if supply.Issuance.Reward.Sign() == 0 { + supply.Issuance.Reward = nil + } + + if supply.Issuance.Withdrawals.Sign() == 0 { + supply.Issuance.Withdrawals = nil + } + + if supply.Issuance.GenesisAlloc == nil && supply.Issuance.Reward == nil && supply.Issuance.Withdrawals == nil { + supply.Issuance = nil + } + + if supply.Burn.EIP1559.Sign() == 0 { + supply.Burn.EIP1559 = nil + } + + if supply.Burn.Blob.Sign() == 0 { + supply.Burn.Blob = nil + } + + if supply.Burn.Misc.Sign() == 0 { + supply.Burn.Misc = nil + } + + if supply.Burn.EIP1559 == nil && supply.Burn.Blob == nil && supply.Burn.Misc == nil { + supply.Burn = nil + } + + out, _ := json.Marshal(supply) + if _, err := s.logger.Write(out); err != nil { + log.Warn("failed to write to supply tracer log file", "error", err) + } + if _, err := s.logger.Write([]byte{'\n'}); err != nil { + log.Warn("failed to write to supply tracer log file", "error", err) + } +} diff --git a/eth/tracers/logger/logger_json.go b/eth/tracers/logger/logger_json.go index d66b8c4b8ad0..797f7ac65821 100644 --- a/eth/tracers/logger/logger_json.go +++ b/eth/tracers/logger/logger_json.go @@ -58,6 +58,7 @@ type jsonLogger struct { encoder *json.Encoder cfg *Config env *tracing.VMContext + hooks *tracing.Hooks } // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects @@ -67,12 +68,14 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks { if l.cfg == nil { l.cfg = &Config{} } - return &tracing.Hooks{ - OnTxStart: l.OnTxStart, - OnExit: l.OnExit, - OnOpcode: l.OnOpcode, - OnFault: l.OnFault, + l.hooks = &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnSystemCallStart: l.onSystemCallStart, + OnExit: l.OnEnd, + OnOpcode: l.OnOpcode, + OnFault: l.OnFault, } + return l.hooks } // NewJSONLoggerWithCallFrames creates a new EVM tracer that prints execution steps as JSON objects @@ -82,13 +85,15 @@ func NewJSONLoggerWithCallFrames(cfg *Config, writer io.Writer) *tracing.Hooks { if l.cfg == nil { l.cfg = &Config{} } - return &tracing.Hooks{ - OnTxStart: l.OnTxStart, - OnEnter: l.OnEnter, - OnExit: l.OnExit, - OnOpcode: l.OnOpcode, - OnFault: l.OnFault, + l.hooks = &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnSystemCallStart: l.onSystemCallStart, + OnEnter: l.OnEnter, + OnExit: l.OnExit, + OnOpcode: l.OnOpcode, + OnFault: l.OnFault, } + return l.hooks } func (l *jsonLogger) OnFault(pc uint64, op byte, gas uint64, cost uint64, scope tracing.OpContext, depth int, err error) { @@ -122,6 +127,16 @@ func (l *jsonLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracin l.encoder.Encode(log) } +func (l *jsonLogger) onSystemCallStart() { + // Process no events while in system call. + hooks := *l.hooks + *l.hooks = tracing.Hooks{ + OnSystemCallEnd: func() { + *l.hooks = hooks + }, + } +} + // OnEnter is not enabled by default. func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { frame := callFrame{ diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 3b6350658038..2b84ecaf4057 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -74,6 +74,11 @@ func (f callFrame) failed() bool { func (f *callFrame) processOutput(output []byte, err error, reverted bool) { output = common.CopyBytes(output) + // Clear error if tx wasn't reverted. This happened + // for pre-homestead contract storage OOG. + if err != nil && !reverted { + err = nil + } if err == nil { f.Output = output return diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index f8d38ddd2d5b..ce0fb081143e 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -23,6 +23,7 @@ import ( "math/big" "slices" "strings" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -114,7 +115,7 @@ type flatCallTracer struct { tracer *callTracer config flatCallTracerConfig ctx *tracers.Context // Holds tracer context data - reason error // Textual reason for the interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption activePrecompiles []common.Address // Updated on tx start based on given rules } @@ -154,6 +155,9 @@ func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trac // OnEnter is called when EVM enters a new scope (via call, create or selfdestruct). func (t *flatCallTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if t.interrupt.Load() { + return + } t.tracer.OnEnter(depth, typ, from, to, input, gas, value) if depth == 0 { @@ -169,6 +173,9 @@ func (t *flatCallTracer) OnEnter(depth int, typ byte, from common.Address, to co // OnExit is called when EVM exits a scope, even if the scope didn't // execute any code. func (t *flatCallTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if t.interrupt.Load() { + return + } t.tracer.OnExit(depth, output, gasUsed, err, reverted) if depth == 0 { @@ -194,6 +201,9 @@ func (t *flatCallTracer) OnExit(depth int, output []byte, gasUsed uint64, err er } func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + if t.interrupt.Load() { + return + } t.tracer.OnTxStart(env, tx, from) // Update list of precompiles based on current block rules := env.ChainConfig.Rules(env.BlockNumber, env.Random != nil, env.Time) @@ -201,6 +211,9 @@ func (t *flatCallTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction } func (t *flatCallTracer) OnTxEnd(receipt *types.Receipt, err error) { + if t.interrupt.Load() { + return + } t.tracer.OnTxEnd(receipt, err) } @@ -219,12 +232,13 @@ func (t *flatCallTracer) GetResult() (json.RawMessage, error) { if err != nil { return nil, err } - return res, t.reason + return res, t.tracer.reason } // Stop terminates execution of the tracer at the first opportune moment. func (t *flatCallTracer) Stop(err error) { t.tracer.Stop(err) + t.interrupt.Store(true) } // isPrecompiled returns whether the addr is a precompile. diff --git a/eth/tracers/native/call_flat_test.go b/eth/tracers/native/call_flat_test.go new file mode 100644 index 000000000000..d5481b868bcc --- /dev/null +++ b/eth/tracers/native/call_flat_test.go @@ -0,0 +1,64 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func TestCallFlatStop(t *testing.T) { + tracer, err := tracers.DefaultDirectory.New("flatCallTracer", &tracers.Context{}, nil) + require.NoError(t, err) + + // this error should be returned by GetResult + stopError := errors.New("stop error") + + // simulate a transaction + tx := types.NewTx(&types.LegacyTx{ + Nonce: 0, + To: &common.Address{}, + Value: big.NewInt(0), + Gas: 0, + GasPrice: big.NewInt(0), + Data: nil, + }) + + tracer.OnTxStart(&tracing.VMContext{ + ChainConfig: params.MainnetChainConfig, + }, tx, common.Address{}) + + tracer.OnEnter(0, byte(vm.CALL), common.Address{}, common.Address{}, nil, 0, big.NewInt(0)) + + // stop before the transaction is finished + tracer.Stop(stopError) + + tracer.OnTxEnd(&types.Receipt{GasUsed: 0}, nil) + + // check that the error is returned by GetResult + _, tracerError := tracer.GetResult() + require.Equal(t, stopError, tracerError) +} diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 5c3cb79dd65c..390f08567714 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -191,7 +191,12 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface } txs[i] = tx.tx } - return types.NewBlockWithHeader(head).WithBody(txs, uncles).WithWithdrawals(body.Withdrawals), nil + return types.NewBlockWithHeader(head).WithBody( + types.Body{ + Transactions: txs, + Uncles: uncles, + Withdrawals: body.Withdrawals, + }), nil } // HeaderByHash returns the block header with the given hash. diff --git a/ethdb/database.go b/ethdb/database.go index 4d4817daf2e5..3ec1f70e3b60 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -88,8 +88,8 @@ type AncientReaderOp interface { // Ancients returns the ancient item numbers in the ancient store. Ancients() (uint64, error) - // Tail returns the number of first stored item in the freezer. - // This number can also be interpreted as the total deleted item numbers. + // Tail returns the number of first stored item in the ancient store. + // This number can also be interpreted as the total deleted items. Tail() (uint64, error) // AncientSize returns the ancient size of the specified category. @@ -101,7 +101,7 @@ type AncientReader interface { AncientReaderOp // ReadAncients runs the given read operation while ensuring that no writes take place - // on the underlying freezer. + // on the underlying ancient store. ReadAncients(fn func(AncientReaderOp) error) (err error) } @@ -141,11 +141,15 @@ type AncientWriteOp interface { AppendRaw(kind string, number uint64, item []byte) error } -// AncientStater wraps the Stat method of a backing data store. +// AncientStater wraps the Stat method of a backing ancient store. type AncientStater interface { - // AncientDatadir returns the path of root ancient directory. Empty string - // will be returned if ancient store is not enabled at all. The returned - // path can be used to construct the path of other freezers. + // AncientDatadir returns the path of the ancient store directory. + // + // If the ancient store is not activated, an error is returned. + // If an ephemeral ancient store is used, an empty path is returned. + // + // The path returned by AncientDatadir can be used as the root path + // of the ancient store to construct paths for other sub ancient stores. AncientDatadir() (string, error) } @@ -171,15 +175,23 @@ type Stater interface { } // AncientStore contains all the methods required to allow handling different -// ancient data stores backing immutable chain data store. +// ancient data stores backing immutable data store. type AncientStore interface { AncientReader AncientWriter io.Closer } +// ResettableAncientStore extends the AncientStore interface by adding a Reset method. +type ResettableAncientStore interface { + AncientStore + + // Reset is designed to reset the entire ancient store to its default state. + Reset() error +} + // Database contains all the methods required by the high level database to not -// only access the key-value data store but also the chain freezer. +// only access the key-value data store but also the ancient chain store. type Database interface { Reader Writer diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go index 83a13c8cff64..29a773ced407 100644 --- a/ethdb/dbtest/testsuite.go +++ b/ethdb/dbtest/testsuite.go @@ -381,7 +381,7 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { } }) - t.Run("OperatonsAfterClose", func(t *testing.T) { + t.Run("OperationsAfterClose", func(t *testing.T) { db := New() db.Put([]byte("key"), []byte("value")) db.Close() @@ -530,7 +530,7 @@ func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) { vals = append(vals, randBytes(vsize)) } if order { - slices.SortFunc(keys, func(a, b []byte) int { return bytes.Compare(a, b) }) + slices.SortFunc(keys, bytes.Compare) } return keys, vals } diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index e58efbddbe80..64f51cf21701 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -400,7 +400,7 @@ func (b *batch) Put(key, value []byte) error { return nil } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts the key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.b.Delete(key) b.size += len(key) diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index 2a939f9a1850..d1233acb2198 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -227,7 +227,7 @@ func (b *batch) Put(key, value []byte) error { return nil } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts the key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.writes = append(b.writes, keyvalue{string(key), nil, true}) b.size += len(key) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 01bfb4be3d1d..0fac07c9604e 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -207,7 +207,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e // The default compaction concurrency(1 thread), // Here use all available CPUs for faster compaction. - MaxConcurrentCompactions: func() int { return runtime.NumCPU() }, + MaxConcurrentCompactions: runtime.NumCPU, // Per-level options. Options for at least one level must be specified. The // options for the last level are used for all subsequent levels. @@ -240,19 +240,19 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e } db.db = innerDB - db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) - db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) - db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) - db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) - db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) - db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) - db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) - db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) - db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) - db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) - db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) - db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) - db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) + db.compTimeMeter = metrics.GetOrRegisterMeter(namespace+"compact/time", nil) + db.compReadMeter = metrics.GetOrRegisterMeter(namespace+"compact/input", nil) + db.compWriteMeter = metrics.GetOrRegisterMeter(namespace+"compact/output", nil) + db.diskSizeGauge = metrics.GetOrRegisterGauge(namespace+"disk/size", nil) + db.diskReadMeter = metrics.GetOrRegisterMeter(namespace+"disk/read", nil) + db.diskWriteMeter = metrics.GetOrRegisterMeter(namespace+"disk/write", nil) + db.writeDelayMeter = metrics.GetOrRegisterMeter(namespace+"compact/writedelay/duration", nil) + db.writeDelayNMeter = metrics.GetOrRegisterMeter(namespace+"compact/writedelay/counter", nil) + db.memCompGauge = metrics.GetOrRegisterGauge(namespace+"compact/memory", nil) + db.level0CompGauge = metrics.GetOrRegisterGauge(namespace+"compact/level0", nil) + db.nonlevel0CompGauge = metrics.GetOrRegisterGauge(namespace+"compact/nonlevel0", nil) + db.seekCompGauge = metrics.GetOrRegisterGauge(namespace+"compact/seek", nil) + db.manualMemAllocGauge = metrics.GetOrRegisterGauge(namespace+"memory/manualalloc", nil) // Start up the metrics gathering and return go db.meter(metricsGatheringInterval, namespace) @@ -543,7 +543,7 @@ func (d *Database) meter(refresh time.Duration, namespace string) { for i, level := range stats.Levels { // Append metrics for additional layers if i >= len(d.levelsGauge) { - d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + d.levelsGauge = append(d.levelsGauge, metrics.GetOrRegisterGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) } d.levelsGauge[i].Update(level.NumFiles) } @@ -575,7 +575,7 @@ func (b *batch) Put(key, value []byte) error { return nil } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts the key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.b.Delete(key, nil) b.size += len(key) diff --git a/event/multisub.go b/event/multisub.go index 5c8d2df48cc4..1f0af2a29249 100644 --- a/event/multisub.go +++ b/event/multisub.go @@ -17,7 +17,7 @@ package event // JoinSubscriptions joins multiple subscriptions to be able to track them as -// one entity and collectively cancel them of consume any errors from them. +// one entity and collectively cancel them or consume any errors from them. func JoinSubscriptions(subs ...Subscription) Subscription { return NewSubscription(func(unsubbed <-chan struct{}) error { // Unsubscribe all subscriptions before returning diff --git a/go.mod b/go.mod index 3cd0d82bdf7c..8968140fbf19 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.21 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 - github.com/Microsoft/go-winio v0.6.1 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/Microsoft/go-winio v0.6.2 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 @@ -15,20 +15,20 @@ require ( github.com/cloudflare/cloudflare-go v0.79.0 github.com/cockroachdb/pebble v1.1.0 github.com/consensys/gnark-crypto v0.12.1 - github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 + github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c github.com/crate-crypto/go-kzg-4844 v1.0.0 github.com/davecgh/go-spew v1.1.1 - github.com/deckarep/golang-set/v2 v2.1.0 + github.com/deckarep/golang-set/v2 v2.6.0 github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 github.com/ethereum/c-kzg-4844 v1.0.0 - github.com/fatih/color v1.13.0 + github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 + github.com/fatih/color v1.16.0 github.com/ferranbt/fastssz v0.1.2 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/fjl/memsize v0.0.2 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/protobuf v1.5.4 @@ -51,7 +51,7 @@ require ( github.com/kilic/bls12-381 v0.1.0 github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.17 + github.com/mattn/go-isatty v0.0.20 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 @@ -69,11 +69,11 @@ require ( go.uber.org/automaxprocs v1.5.2 golang.org/x/crypto v0.22.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.20.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.20.0 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 ) @@ -93,7 +93,7 @@ require ( github.com/aws/smithy-go v1.15.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.5 // indirect diff --git a/go.sum b/go.sum index a7b4eb1c138a..5e4b38e9a6bc 100644 --- a/go.sum +++ b/go.sum @@ -44,17 +44,15 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgx github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -103,8 +101,9 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -133,8 +132,8 @@ github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJ github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -142,8 +141,8 @@ github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= -github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= @@ -169,8 +168,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk= github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= @@ -185,8 +186,6 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgx github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -377,16 +376,14 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= @@ -682,7 +679,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -690,11 +686,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -851,8 +848,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/era/era.go b/internal/era/era.go index 2b9e6229018a..6ad7339b36a0 100644 --- a/internal/era/era.go +++ b/internal/era/era.go @@ -151,7 +151,7 @@ func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) { if err := rlp.Decode(r, &body); err != nil { return nil, err } - return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil + return types.NewBlockWithHeader(&header).WithBody(body), nil } // Accumulator reads the accumulator entry in the Era1 file. diff --git a/internal/era/iterator.go b/internal/era/iterator.go index cc4f27c20190..f48aab46b4ec 100644 --- a/internal/era/iterator.go +++ b/internal/era/iterator.go @@ -73,7 +73,7 @@ func (it *Iterator) Block() (*types.Block, error) { if err := rlp.Decode(it.inner.Body, &body); err != nil { return nil, err } - return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil + return types.NewBlockWithHeader(&header).WithBody(body), nil } // Receipts returns the receipts for the iterator's current position. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 4b5145f5deea..8866a5b8706d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -966,7 +966,7 @@ func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc. // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is // set, message execution will only use the data in the given state. Otherwise -// if statDiff is set, all diff will be applied first and then execute the call +// if stateDiff is set, all diff will be applied first and then execute the call // message. type OverrideAccount struct { Nonce *hexutil.Uint64 `json:"nonce"` @@ -1203,7 +1203,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr return 0, err } call := args.ToMessage(header.BaseFee) - // Run the gas estimation andwrap any revertals into a custom return + // Run the gas estimation and wrap any revertals into a custom return estimate, revert, err := gasestimator.Estimate(ctx, call, opts, gasCap) if err != nil { if len(revert) > 0 { @@ -1524,6 +1524,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH prevTracer = logger.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) } for { + if err := ctx.Err(); err != nil { + return nil, 0, nil, err + } // Retrieve the current access list to expand accessList := prevTracer.AccessList() log.Trace("Creating access list", "input", accessList) diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 1f62d0c6bde3..cf5160caf778 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -751,7 +751,7 @@ func TestEstimateGas(t *testing.T) { From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, + BlobHashes: []common.Hash{{0x01, 0x22}}, BlobFeeCap: (*hexutil.Big)(big.NewInt(1)), }, want: 21000, @@ -939,7 +939,7 @@ func TestCall(t *testing.T) { call: TransactionArgs{ From: &accounts[1].addr, To: &randomAccounts[2].addr, - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, + BlobHashes: []common.Hash{{0x01, 0x22}}, BlobFeeCap: (*hexutil.Big)(big.NewInt(1)), }, overrides: StateOverride{ @@ -1063,7 +1063,7 @@ func TestSendBlobTransaction(t *testing.T) { From: &b.acc.Address, To: &to, Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, + BlobHashes: []common.Hash{{0x01, 0x22}}, }) if err != nil { t.Fatalf("failed to fill tx defaults: %v\n", err) @@ -1348,7 +1348,7 @@ func TestRPCMarshalBlock(t *testing.T) { } txs = append(txs, tx) } - block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, &types.Body{Transactions: txs}, nil, blocktest.NewHasher()) var testSuite = []struct { inclTx bool @@ -1559,7 +1559,7 @@ func TestRPCGetBlockOrHeader(t *testing.T) { Address: common.Address{0x12, 0x34}, Amount: 10, } - pending = types.NewBlockWithWithdrawals(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, []*types.Withdrawal{withdrawal}, blocktest.NewHasher()) + pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, &types.Body{Transactions: types.Transactions{tx}, Withdrawals: types.Withdrawals{withdrawal}}, nil, blocktest.NewHasher()) ) backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] diff --git a/internal/testlog/testlog.go b/internal/testlog/testlog.go index 3740dd1f242c..ad61af9eac20 100644 --- a/internal/testlog/testlog.go +++ b/internal/testlog/testlog.go @@ -58,7 +58,7 @@ func (h *bufHandler) Handle(_ context.Context, r slog.Record) error { } func (h *bufHandler) Enabled(_ context.Context, lvl slog.Level) bool { - return lvl <= h.level + return lvl >= h.level } func (h *bufHandler) WithAttrs(attrs []slog.Attr) slog.Handler { diff --git a/log/logger_test.go b/log/logger_test.go index 2ea08585475d..f1a9a93bce7a 100644 --- a/log/logger_test.go +++ b/log/logger_test.go @@ -26,7 +26,7 @@ func TestLoggingWithVmodule(t *testing.T) { logger.Trace("a message", "foo", "bar") have := out.String() // The timestamp is locale-dependent, so we want to trim that off - // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..." + // "INFO [01-01|00:00:00.000] a message ..." -> "a message..." have = strings.Split(have, "]")[1] want := " a message foo=bar\n" if have != want { @@ -42,7 +42,7 @@ func TestTerminalHandlerWithAttrs(t *testing.T) { logger.Trace("a message", "foo", "bar") have := out.String() // The timestamp is locale-dependent, so we want to trim that off - // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..." + // "INFO [01-01|00:00:00.000] a message ..." -> "a message..." have = strings.Split(have, "]")[1] want := " a message baz=bat foo=bar\n" if have != want { @@ -97,7 +97,7 @@ func benchmarkLogger(b *testing.B, l Logger) { tt = time.Now() bigint = big.NewInt(100) nilbig *big.Int - err = errors.New("Oh nooes it's crap") + err = errors.New("oh nooes it's crap") ) b.ReportAllocs() b.ResetTimer() @@ -126,7 +126,7 @@ func TestLoggerOutput(t *testing.T) { tt = time.Time{} bigint = big.NewInt(100) nilbig *big.Int - err = errors.New("Oh nooes it's crap") + err = errors.New("oh nooes it's crap") smallUint = uint256.NewInt(500_000) bigUint = &uint256.Int{0xff, 0xff, 0xff, 0xff} ) @@ -150,7 +150,7 @@ func TestLoggerOutput(t *testing.T) { have := out.String() t.Logf("output %v", out.String()) - want := `INFO [11-07|19:14:33.821] This is a message foo=123 bytes="[0 0 0 0 0 0 0 0 0 0]" bonk="a string with text" time=0001-01-01T00:00:00+0000 bigint=100 nilbig= err="Oh nooes it's crap" struct="{A:Foo B:12}" struct="{A:Foo\nLinebreak B:122}" ptrstruct="&{A:Foo B:12}" smalluint=500,000 bigUint=1,600,660,942,523,603,594,864,898,306,482,794,244,293,965,082,972,225,630,372,095 + want := `INFO [11-07|19:14:33.821] This is a message foo=123 bytes="[0 0 0 0 0 0 0 0 0 0]" bonk="a string with text" time=0001-01-01T00:00:00+0000 bigint=100 nilbig= err="oh nooes it's crap" struct="{A:Foo B:12}" struct="{A:Foo\nLinebreak B:122}" ptrstruct="&{A:Foo B:12}" smalluint=500,000 bigUint=1,600,660,942,523,603,594,864,898,306,482,794,244,293,965,082,972,225,630,372,095 ` if !bytes.Equal([]byte(have)[25:], []byte(want)[25:]) { t.Errorf("Error\nhave: %q\nwant: %q", have, want) diff --git a/metrics/debug.go b/metrics/debug.go index de4a2739fe08..9dfee1a86698 100644 --- a/metrics/debug.go +++ b/metrics/debug.go @@ -19,18 +19,18 @@ var ( gcStats debug.GCStats ) -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. +// CaptureDebugGCStats captures new values for the Go garbage collector statistics +// exported in debug.GCStats. This is designed to be called as a goroutine. func CaptureDebugGCStats(r Registry, d time.Duration) { for range time.Tick(d) { CaptureDebugGCStatsOnce(r) } } -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. +// CaptureDebugGCStatsOnce captures new values for the Go garbage collector +// statistics exported in debug.GCStats. This is designed to be called in +// a background goroutine. Giving a registry which has not been given to +// RegisterDebugGCStats will panic. // // Be careful (but much less so) with this because debug.ReadGCStats calls // the C function runtime·lock(runtime·mheap) which, while not a stop-the-world @@ -50,9 +50,9 @@ func CaptureDebugGCStatsOnce(r Registry) { debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) } -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. +// RegisterDebugGCStats registers metrics for the Go garbage collector statistics +// exported in debug.GCStats. The metrics are named by their fully-qualified Go +// symbols, i.e. debug.GCStats.PauseTotal. func RegisterDebugGCStats(r Registry) { debugMetrics.GCStats.LastGC = NewGauge() debugMetrics.GCStats.NumGC = NewGauge() diff --git a/metrics/sample_test.go b/metrics/sample_test.go index 9835ec1c3003..4227b43ef775 100644 --- a/metrics/sample_test.go +++ b/metrics/sample_test.go @@ -103,18 +103,18 @@ func TestExpDecaySample(t *testing.T) { } snap := sample.Snapshot() if have, want := int(snap.Count()), tc.updates; have != want { - t.Errorf("have %d want %d", have, want) + t.Errorf("unexpected count: have %d want %d", have, want) } if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want { - t.Errorf("have %d want %d", have, want) + t.Errorf("unexpected size: have %d want %d", have, want) } values := snap.(*sampleSnapshot).values if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want { - t.Errorf("have %d want %d", have, want) + t.Errorf("unexpected values length: have %d want %d", have, want) } for _, v := range values { if v > int64(tc.updates) || v < 0 { - t.Errorf("out of range [0, %d): %v", tc.updates, v) + t.Errorf("out of range [0, %d]: %v", tc.updates, v) } } } @@ -125,12 +125,12 @@ func TestExpDecaySample(t *testing.T) { // The priority becomes +Inf quickly after starting if this is done, // effectively freezing the set of samples until a rescale step happens. func TestExpDecaySampleNanosecondRegression(t *testing.T) { - sw := NewExpDecaySample(100, 0.99) - for i := 0; i < 100; i++ { + sw := NewExpDecaySample(1000, 0.99) + for i := 0; i < 1000; i++ { sw.Update(10) } time.Sleep(1 * time.Millisecond) - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { sw.Update(20) } s := sw.Snapshot() @@ -195,7 +195,7 @@ func TestUniformSample(t *testing.T) { } for _, v := range values { if v > 1000 || v < 0 { - t.Errorf("out of range [0, 100): %v\n", v) + t.Errorf("out of range [0, 1000]: %v\n", v) } } } @@ -251,6 +251,9 @@ func benchmarkSample(b *testing.B, s Sample) { } func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) { + if sum := s.Sum(); sum != 496598 { + t.Errorf("s.Sum(): 496598 != %v\n", sum) + } if count := s.Count(); count != 10000 { t.Errorf("s.Count(): 10000 != %v\n", count) } diff --git a/miner/miner.go b/miner/miner.go index 430efcb2fcf1..ff81d0e8f5c8 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -53,7 +53,7 @@ type Config struct { // DefaultConfig contains default settings for miner. var DefaultConfig = Config{ GasCeil: 30_000_000, - GasPrice: big.NewInt(params.GWei), + GasPrice: big.NewInt(params.GWei / 1000), // The default recommit time is chosen as two seconds since // consensus-layer usually will wait a half slot of time(6s) diff --git a/miner/miner_test.go b/miner/miner_test.go index 7c39564240c1..da133ad8d0b6 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -78,7 +78,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return types.NewBlock(bc.CurrentBlock(), nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 1728b9e5bd59..ac9b2ab704ba 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -141,7 +141,7 @@ func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*Miner, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) - backend.txPool.Add(pendingTxs, true, false) + backend.txPool.Add(pendingTxs, true, true) w := New(backend, testConfig, engine) return w, backend } diff --git a/node/api.go b/node/api.go index a71ae6aa2954..33dfb3a1cc4d 100644 --- a/node/api.go +++ b/node/api.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rpc" ) @@ -39,6 +40,9 @@ func (n *Node) apis() []rpc.API { }, { Namespace: "debug", Service: debug.Handler, + }, { + Namespace: "debug", + Service: &p2pDebugAPI{n}, }, { Namespace: "web3", Service: &web3API{n}, @@ -333,3 +337,16 @@ func (s *web3API) ClientVersion() string { func (s *web3API) Sha3(input hexutil.Bytes) hexutil.Bytes { return crypto.Keccak256(input) } + +// p2pDebugAPI provides access to p2p internals for debugging. +type p2pDebugAPI struct { + stack *Node +} + +func (s *p2pDebugAPI) DiscoveryV4Table() [][]discover.BucketNode { + disc := s.stack.server.DiscoveryV4() + if disc != nil { + return disc.TableBuckets() + } + return nil +} diff --git a/node/node.go b/node/node.go index 6cbae68591eb..633f88f058a1 100644 --- a/node/node.go +++ b/node/node.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" @@ -752,7 +753,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient var db ethdb.Database var err error if n.config.DataDir == "" { - db = rawdb.NewMemoryDatabase() + db, err = rawdb.NewDatabaseWithFreezer(memorydb.New(), "", namespace, readonly) } else { db, err = rawdb.Open(rawdb.OpenOptions{ Type: n.config.DBEngine, diff --git a/p2p/discover/common.go b/p2p/discover/common.go index 1f763904bb1f..0716f7472fce 100644 --- a/p2p/discover/common.go +++ b/p2p/discover/common.go @@ -18,7 +18,12 @@ package discover import ( "crypto/ecdsa" + crand "crypto/rand" + "encoding/binary" + "math/rand" "net" + "net/netip" + "sync" "time" "github.com/ethereum/go-ethereum/common/mclock" @@ -30,8 +35,8 @@ import ( // UDPConn is a network connection on which discovery can operate. type UDPConn interface { - ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) - WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error) + ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) + WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (n int, err error) Close() error LocalAddr() net.Addr } @@ -62,7 +67,7 @@ type Config struct { func (cfg Config) withDefaults() Config { // Node table configuration: if cfg.PingInterval == 0 { - cfg.PingInterval = 10 * time.Second + cfg.PingInterval = 3 * time.Second } if cfg.RefreshInterval == 0 { cfg.RefreshInterval = 30 * time.Minute @@ -90,5 +95,46 @@ func ListenUDP(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { // channel if configured. type ReadPacket struct { Data []byte - Addr *net.UDPAddr + Addr netip.AddrPort +} + +type randomSource interface { + Intn(int) int + Int63n(int64) int64 + Shuffle(int, func(int, int)) +} + +// reseedingRandom is a random number generator that tracks when it was last re-seeded. +type reseedingRandom struct { + mu sync.Mutex + cur *rand.Rand +} + +func (r *reseedingRandom) seed() { + var b [8]byte + crand.Read(b[:]) + seed := binary.BigEndian.Uint64(b[:]) + new := rand.New(rand.NewSource(int64(seed))) + + r.mu.Lock() + r.cur = new + r.mu.Unlock() +} + +func (r *reseedingRandom) Intn(n int) int { + r.mu.Lock() + defer r.mu.Unlock() + return r.cur.Intn(n) +} + +func (r *reseedingRandom) Int63n(n int64) int64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.cur.Int63n(n) +} + +func (r *reseedingRandom) Shuffle(n int, swap func(i, j int)) { + r.mu.Lock() + defer r.mu.Unlock() + r.cur.Shuffle(n, swap) } diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index b8d97b44e1cc..09808b71e079 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -29,16 +29,16 @@ import ( // not need to be an actual node identifier. type lookup struct { tab *Table - queryfunc func(*node) ([]*node, error) - replyCh chan []*node + queryfunc queryFunc + replyCh chan []*enode.Node cancelCh <-chan struct{} asked, seen map[enode.ID]bool result nodesByDistance - replyBuffer []*node + replyBuffer []*enode.Node queries int } -type queryFunc func(*node) ([]*node, error) +type queryFunc func(*enode.Node) ([]*enode.Node, error) func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *lookup { it := &lookup{ @@ -47,7 +47,7 @@ func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *l asked: make(map[enode.ID]bool), seen: make(map[enode.ID]bool), result: nodesByDistance{target: target}, - replyCh: make(chan []*node, alpha), + replyCh: make(chan []*enode.Node, alpha), cancelCh: ctx.Done(), queries: -1, } @@ -61,7 +61,7 @@ func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *l func (it *lookup) run() []*enode.Node { for it.advance() { } - return unwrapNodes(it.result.entries) + return it.result.entries } // advance advances the lookup until any new nodes have been found. @@ -139,33 +139,14 @@ func (it *lookup) slowdown() { } } -func (it *lookup) query(n *node, reply chan<- []*node) { - fails := it.tab.db.FindFails(n.ID(), n.IP()) +func (it *lookup) query(n *enode.Node, reply chan<- []*enode.Node) { r, err := it.queryfunc(n) - if errors.Is(err, errClosed) { - // Avoid recording failures on shutdown. - reply <- nil - return - } else if len(r) == 0 { - fails++ - it.tab.db.UpdateFindFails(n.ID(), n.IP(), fails) - // Remove the node from the local table if it fails to return anything useful too - // many times, but only if there are enough other nodes in the bucket. - dropped := false - if fails >= maxFindnodeFailures && it.tab.bucketLen(n.ID()) >= bucketSize/2 { - dropped = true - it.tab.delete(n) + if !errors.Is(err, errClosed) { // avoid recording failures on shutdown. + success := len(r) > 0 + it.tab.trackRequest(n, success, r) + if err != nil { + it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "err", err) } - it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "failcount", fails, "dropped", dropped, "err", err) - } else if fails > 0 { - // Reset failure counter because it counts _consecutive_ failures. - it.tab.db.UpdateFindFails(n.ID(), n.IP(), 0) - } - - // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll - // just remove those again during revalidation. - for _, n := range r { - it.tab.addSeenNode(n) } reply <- r } @@ -173,7 +154,7 @@ func (it *lookup) query(n *node, reply chan<- []*node) { // lookupIterator performs lookup operations and iterates over all seen nodes. // When a lookup finishes, a new one is created through nextLookup. type lookupIterator struct { - buffer []*node + buffer []*enode.Node nextLookup lookupFunc ctx context.Context cancel func() @@ -192,7 +173,7 @@ func (it *lookupIterator) Node() *enode.Node { if len(it.buffer) == 0 { return nil } - return unwrapNode(it.buffer[0]) + return it.buffer[0] } // Next moves to the next node. diff --git a/p2p/discover/metrics.go b/p2p/discover/metrics.go index 3cd0ab041403..8deafbbce47b 100644 --- a/p2p/discover/metrics.go +++ b/p2p/discover/metrics.go @@ -18,7 +18,7 @@ package discover import ( "fmt" - "net" + "net/netip" "github.com/ethereum/go-ethereum/metrics" ) @@ -58,16 +58,16 @@ func newMeteredConn(conn UDPConn) UDPConn { return &meteredUdpConn{UDPConn: conn} } -// ReadFromUDP delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way. -func (c *meteredUdpConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { - n, addr, err = c.UDPConn.ReadFromUDP(b) +// ReadFromUDPAddrPort delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way. +func (c *meteredUdpConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { + n, addr, err = c.UDPConn.ReadFromUDPAddrPort(b) ingressTrafficMeter.Mark(int64(n)) return n, addr, err } -// Write delegates a network write to the underlying connection, bumping the udp egress traffic meter along the way. -func (c *meteredUdpConn) WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error) { - n, err = c.UDPConn.WriteToUDP(b, addr) +// WriteToUDP delegates a network write to the underlying connection, bumping the udp egress traffic meter along the way. +func (c *meteredUdpConn) WriteToUDP(b []byte, addr netip.AddrPort) (n int, err error) { + n, err = c.UDPConn.WriteToUDPAddrPort(b, addr) egressTrafficMeter.Mark(int64(n)) return n, err } diff --git a/p2p/discover/node.go b/p2p/discover/node.go index 9ffe101ccff8..042619221bde 100644 --- a/p2p/discover/node.go +++ b/p2p/discover/node.go @@ -21,7 +21,8 @@ import ( "crypto/elliptic" "errors" "math/big" - "net" + "slices" + "sort" "time" "github.com/ethereum/go-ethereum/common/math" @@ -29,12 +30,22 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" ) -// node represents a host on the network. -// The fields of Node may not be modified. -type node struct { - enode.Node - addedAt time.Time // time when the node was added to the table - livenessChecks uint // how often liveness was checked +type BucketNode struct { + Node *enode.Node `json:"node"` + AddedToTable time.Time `json:"addedToTable"` + AddedToBucket time.Time `json:"addedToBucket"` + Checks int `json:"checks"` + Live bool `json:"live"` +} + +// tableNode is an entry in Table. +type tableNode struct { + *enode.Node + revalList *revalidationList + addedToTable time.Time // first time node was added to bucket or replacement list + addedToBucket time.Time // time it was added in the actual bucket + livenessChecks uint // how often liveness was checked + isValidatedLive bool // true if existence of node is considered validated right now } type encPubkey [64]byte @@ -64,34 +75,59 @@ func (e encPubkey) id() enode.ID { return enode.ID(crypto.Keccak256Hash(e[:])) } -func wrapNode(n *enode.Node) *node { - return &node{Node: *n} -} - -func wrapNodes(ns []*enode.Node) []*node { - result := make([]*node, len(ns)) +func unwrapNodes(ns []*tableNode) []*enode.Node { + result := make([]*enode.Node, len(ns)) for i, n := range ns { - result[i] = wrapNode(n) + result[i] = n.Node } return result } -func unwrapNode(n *node) *enode.Node { - return &n.Node +func (n *tableNode) String() string { + return n.Node.String() } -func unwrapNodes(ns []*node) []*enode.Node { - result := make([]*enode.Node, len(ns)) - for i, n := range ns { - result[i] = unwrapNode(n) +// nodesByDistance is a list of nodes, ordered by distance to target. +type nodesByDistance struct { + entries []*enode.Node + target enode.ID +} + +// push adds the given node to the list, keeping the total size below maxElems. +func (h *nodesByDistance) push(n *enode.Node, maxElems int) { + ix := sort.Search(len(h.entries), func(i int) bool { + return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 + }) + + end := len(h.entries) + if len(h.entries) < maxElems { + h.entries = append(h.entries, n) + } + if ix < end { + // Slide existing entries down to make room. + // This will overwrite the entry we just appended. + copy(h.entries[ix+1:], h.entries[ix:]) + h.entries[ix] = n } - return result } -func (n *node) addr() *net.UDPAddr { - return &net.UDPAddr{IP: n.IP(), Port: n.UDP()} +type nodeType interface { + ID() enode.ID } -func (n *node) String() string { - return n.Node.String() +// containsID reports whether ns contains a node with the given ID. +func containsID[N nodeType](ns []N, id enode.ID) bool { + for _, n := range ns { + if n.ID() == id { + return true + } + } + return false +} + +// deleteNode removes a node from the list. +func deleteNode[N nodeType](list []N, id enode.ID) []N { + return slices.DeleteFunc(list, func(n N) bool { + return n.ID() == id + }) } diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 2b7a28708b8d..bd3c9b4143f0 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -24,16 +24,14 @@ package discover import ( "context" - crand "crypto/rand" - "encoding/binary" "fmt" - mrand "math/rand" "net" - "sort" + "slices" "sync" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/enode" @@ -55,21 +53,21 @@ const ( bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 tableIPLimit, tableSubnet = 10, 24 - copyNodesInterval = 30 * time.Second - seedMinTableTime = 5 * time.Minute - seedCount = 30 - seedMaxAge = 5 * 24 * time.Hour + seedMinTableTime = 5 * time.Minute + seedCount = 30 + seedMaxAge = 5 * 24 * time.Hour ) // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps // itself up-to-date by verifying the liveness of neighbors and requesting their node // records when announcements of a new record version are received. type Table struct { - mutex sync.Mutex // protects buckets, bucket content, nursery, rand - buckets [nBuckets]*bucket // index of known nodes by distance - nursery []*node // bootstrap nodes - rand *mrand.Rand // source of randomness, periodically reseeded - ips netutil.DistinctNetSet + mutex sync.Mutex // protects buckets, bucket content, nursery, rand + buckets [nBuckets]*bucket // index of known nodes by distance + nursery []*enode.Node // bootstrap nodes + rand reseedingRandom // source of randomness, periodically reseeded + ips netutil.DistinctNetSet + revalidation tableRevalidation db *enode.DB // database of known nodes net transport @@ -77,13 +75,17 @@ type Table struct { log log.Logger // loop channels - refreshReq chan chan struct{} - initDone chan struct{} - closeReq chan struct{} - closed chan struct{} + refreshReq chan chan struct{} + revalResponseCh chan revalidationResponse + addNodeCh chan addNodeOp + addNodeHandled chan bool + trackRequestCh chan trackRequestOp + initDone chan struct{} + closeReq chan struct{} + closed chan struct{} - nodeAddedHook func(*bucket, *node) - nodeRemovedHook func(*bucket, *node) + nodeAddedHook func(*bucket, *tableNode) + nodeRemovedHook func(*bucket, *tableNode) } // transport is implemented by the UDP transports. @@ -98,28 +100,40 @@ type transport interface { // bucket contains nodes, ordered by their last activity. the entry // that was most recently active is the first element in entries. type bucket struct { - entries []*node // live entries, sorted by time of last contact - replacements []*node // recently seen nodes to be used if revalidation fails + entries []*tableNode // live entries, sorted by time of last contact + replacements []*tableNode // recently seen nodes to be used if revalidation fails ips netutil.DistinctNetSet index int } +type addNodeOp struct { + node *enode.Node + isInbound bool + forceSetLive bool // for tests +} + +type trackRequestOp struct { + node *enode.Node + foundNodes []*enode.Node + success bool +} + func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { cfg = cfg.withDefaults() tab := &Table{ - net: t, - db: db, - cfg: cfg, - log: cfg.Log, - refreshReq: make(chan chan struct{}), - initDone: make(chan struct{}), - closeReq: make(chan struct{}), - closed: make(chan struct{}), - rand: mrand.New(mrand.NewSource(0)), - ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, - } - if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { - return nil, err + net: t, + db: db, + cfg: cfg, + log: cfg.Log, + refreshReq: make(chan chan struct{}), + revalResponseCh: make(chan revalidationResponse), + addNodeCh: make(chan addNodeOp), + addNodeHandled: make(chan bool), + trackRequestCh: make(chan trackRequestOp), + initDone: make(chan struct{}), + closeReq: make(chan struct{}), + closed: make(chan struct{}), + ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, } for i := range tab.buckets { tab.buckets[i] = &bucket{ @@ -127,41 +141,34 @@ func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, } } - tab.seedRand() - tab.loadSeedNodes() - - return tab, nil -} + tab.rand.seed() + tab.revalidation.init(&cfg) -func newMeteredTable(t transport, db *enode.DB, cfg Config) (*Table, error) { - tab, err := newTable(t, db, cfg) - if err != nil { + // initial table content + if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { return nil, err } - if metrics.Enabled { - tab.nodeAddedHook = func(b *bucket, n *node) { - bucketsCounter[b.index].Inc(1) - } - tab.nodeRemovedHook = func(b *bucket, n *node) { - bucketsCounter[b.index].Dec(1) - } - } + tab.loadSeedNodes() + return tab, nil } // Nodes returns all nodes contained in the table. -func (tab *Table) Nodes() []*enode.Node { - if !tab.isInitDone() { - return nil - } - +func (tab *Table) Nodes() [][]BucketNode { tab.mutex.Lock() defer tab.mutex.Unlock() - var nodes []*enode.Node - for _, b := range &tab.buckets { - for _, n := range b.entries { - nodes = append(nodes, unwrapNode(n)) + nodes := make([][]BucketNode, len(tab.buckets)) + for i, b := range &tab.buckets { + nodes[i] = make([]BucketNode, len(b.entries)) + for j, n := range b.entries { + nodes[i][j] = BucketNode{ + Node: n.Node, + Checks: int(n.livenessChecks), + Live: n.isValidatedLive, + AddedToTable: n.addedToTable, + AddedToBucket: n.addedToBucket, + } } } return nodes @@ -171,15 +178,6 @@ func (tab *Table) self() *enode.Node { return tab.net.Self() } -func (tab *Table) seedRand() { - var b [8]byte - crand.Read(b[:]) - - tab.mutex.Lock() - tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) - tab.mutex.Unlock() -} - // getNode returns the node with the given ID or nil if it isn't in the table. func (tab *Table) getNode(id enode.ID) *enode.Node { tab.mutex.Lock() @@ -188,7 +186,7 @@ func (tab *Table) getNode(id enode.ID) *enode.Node { b := tab.bucket(id) for _, e := range b.entries { if e.ID() == id { - return unwrapNode(e) + return e.Node } } return nil @@ -204,7 +202,7 @@ func (tab *Table) close() { // are used to connect to the network if the table is empty and there // are no known nodes in the database. func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { - nursery := make([]*node, 0, len(nodes)) + nursery := make([]*enode.Node, 0, len(nodes)) for _, n := range nodes { if err := n.ValidateComplete(); err != nil { return fmt.Errorf("bad bootstrap node %q: %v", n, err) @@ -213,7 +211,7 @@ func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IP()) continue } - nursery = append(nursery, wrapNode(n)) + nursery = append(nursery, n) } tab.nursery = nursery return nil @@ -239,52 +237,173 @@ func (tab *Table) refresh() <-chan struct{} { return done } -// loop schedules runs of doRefresh, doRevalidate and copyLiveNodes. +// findnodeByID returns the n nodes in the table that are closest to the given id. +// This is used by the FINDNODE/v4 handler. +// +// The preferLive parameter says whether the caller wants liveness-checked results. If +// preferLive is true and the table contains any verified nodes, the result will not +// contain unverified nodes. However, if there are no verified nodes at all, the result +// will contain unverified nodes. +func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + // Scan all buckets. There might be a better way to do this, but there aren't that many + // buckets, so this solution should be fine. The worst-case complexity of this loop + // is O(tab.len() * nresults). + nodes := &nodesByDistance{target: target} + liveNodes := &nodesByDistance{target: target} + for _, b := range &tab.buckets { + for _, n := range b.entries { + nodes.push(n.Node, nresults) + if preferLive && n.isValidatedLive { + liveNodes.push(n.Node, nresults) + } + } + } + + if preferLive && len(liveNodes.entries) > 0 { + return liveNodes + } + return nodes +} + +// appendLiveNodes adds nodes at the given distance to the result slice. +// This is used by the FINDNODE/v5 handler. +func (tab *Table) appendLiveNodes(dist uint, result []*enode.Node) []*enode.Node { + if dist > 256 { + return result + } + if dist == 0 { + return append(result, tab.self()) + } + + tab.mutex.Lock() + for _, n := range tab.bucketAtDistance(int(dist)).entries { + if n.isValidatedLive { + result = append(result, n.Node) + } + } + tab.mutex.Unlock() + + // Shuffle result to avoid always returning same nodes in FINDNODE/v5. + tab.rand.Shuffle(len(result), func(i, j int) { + result[i], result[j] = result[j], result[i] + }) + return result +} + +// len returns the number of nodes in the table. +func (tab *Table) len() (n int) { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + for _, b := range &tab.buckets { + n += len(b.entries) + } + return n +} + +// addFoundNode adds a node which may not be live. If the bucket has space available, +// adding the node succeeds immediately. Otherwise, the node is added to the replacements +// list. +// +// The caller must not hold tab.mutex. +func (tab *Table) addFoundNode(n *enode.Node, forceSetLive bool) bool { + op := addNodeOp{node: n, isInbound: false, forceSetLive: forceSetLive} + select { + case tab.addNodeCh <- op: + return <-tab.addNodeHandled + case <-tab.closeReq: + return false + } +} + +// addInboundNode adds a node from an inbound contact. If the bucket has no space, the +// node is added to the replacements list. +// +// There is an additional safety measure: if the table is still initializing the node is +// not added. This prevents an attack where the table could be filled by just sending ping +// repeatedly. +// +// The caller must not hold tab.mutex. +func (tab *Table) addInboundNode(n *enode.Node) bool { + op := addNodeOp{node: n, isInbound: true} + select { + case tab.addNodeCh <- op: + return <-tab.addNodeHandled + case <-tab.closeReq: + return false + } +} + +func (tab *Table) trackRequest(n *enode.Node, success bool, foundNodes []*enode.Node) { + op := trackRequestOp{n, foundNodes, success} + select { + case tab.trackRequestCh <- op: + case <-tab.closeReq: + } +} + +// loop is the main loop of Table. func (tab *Table) loop() { var ( - revalidate = time.NewTimer(tab.nextRevalidateTime()) - refresh = time.NewTimer(tab.nextRefreshTime()) - copyNodes = time.NewTicker(copyNodesInterval) - refreshDone = make(chan struct{}) // where doRefresh reports completion - revalidateDone chan struct{} // where doRevalidate reports completion - waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs + refresh = time.NewTimer(tab.nextRefreshTime()) + refreshDone = make(chan struct{}) // where doRefresh reports completion + waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs + revalTimer = mclock.NewAlarm(tab.cfg.Clock) + reseedRandTimer = time.NewTicker(10 * time.Minute) ) defer refresh.Stop() - defer revalidate.Stop() - defer copyNodes.Stop() + defer revalTimer.Stop() + defer reseedRandTimer.Stop() // Start initial refresh. go tab.doRefresh(refreshDone) loop: for { + nextTime := tab.revalidation.run(tab, tab.cfg.Clock.Now()) + revalTimer.Schedule(nextTime) + select { + case <-reseedRandTimer.C: + tab.rand.seed() + + case <-revalTimer.C(): + + case r := <-tab.revalResponseCh: + tab.revalidation.handleResponse(tab, r) + + case op := <-tab.addNodeCh: + tab.mutex.Lock() + ok := tab.handleAddNode(op) + tab.mutex.Unlock() + tab.addNodeHandled <- ok + + case op := <-tab.trackRequestCh: + tab.handleTrackRequest(op) + case <-refresh.C: - tab.seedRand() if refreshDone == nil { refreshDone = make(chan struct{}) go tab.doRefresh(refreshDone) } + case req := <-tab.refreshReq: waiting = append(waiting, req) if refreshDone == nil { refreshDone = make(chan struct{}) go tab.doRefresh(refreshDone) } + case <-refreshDone: for _, ch := range waiting { close(ch) } waiting, refreshDone = nil, nil refresh.Reset(tab.nextRefreshTime()) - case <-revalidate.C: - revalidateDone = make(chan struct{}) - go tab.doRevalidate(revalidateDone) - case <-revalidateDone: - revalidate.Reset(tab.nextRevalidateTime()) - revalidateDone = nil - case <-copyNodes.C: - go tab.copyLiveNodes() + case <-tab.closeReq: break loop } @@ -296,9 +415,6 @@ loop: for _, ch := range waiting { close(ch) } - if revalidateDone != nil { - <-revalidateDone - } close(tab.closed) } @@ -327,177 +443,24 @@ func (tab *Table) doRefresh(done chan struct{}) { } func (tab *Table) loadSeedNodes() { - seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) + seeds := tab.db.QuerySeeds(seedCount, seedMaxAge) seeds = append(seeds, tab.nursery...) for i := range seeds { seed := seeds[i] if tab.log.Enabled(context.Background(), log.LevelTrace) { age := time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) - tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) + addr, _ := seed.UDPEndpoint() + tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", addr, "age", age) } - tab.addSeenNode(seed) + tab.handleAddNode(addNodeOp{node: seed, isInbound: false}) } } -// doRevalidate checks that the last node in a random bucket is still live and replaces or -// deletes the node if it isn't. -func (tab *Table) doRevalidate(done chan<- struct{}) { - defer func() { done <- struct{}{} }() - - last, bi := tab.nodeToRevalidate() - if last == nil { - // No non-empty bucket found. - return - } - - // Ping the selected node and wait for a pong. - remoteSeq, err := tab.net.ping(unwrapNode(last)) - - // Also fetch record if the node replied and returned a higher sequence number. - if last.Seq() < remoteSeq { - n, err := tab.net.RequestENR(unwrapNode(last)) - if err != nil { - tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err) - } else { - last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks} - } - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - b := tab.buckets[bi] - if err == nil { - // The node responded, move it to the front. - last.livenessChecks++ - tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks) - tab.bumpInBucket(b, last) - return - } - // No reply received, pick a replacement or delete the node if there aren't - // any replacements. - if r := tab.replace(b, last); r != nil { - tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP()) - } else { - tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks) - } -} - -// nodeToRevalidate returns the last node in a random, non-empty bucket. -func (tab *Table) nodeToRevalidate() (n *node, bi int) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - for _, bi = range tab.rand.Perm(len(tab.buckets)) { - b := tab.buckets[bi] - if len(b.entries) > 0 { - last := b.entries[len(b.entries)-1] - return last, bi - } - } - return nil, 0 -} - -func (tab *Table) nextRevalidateTime() time.Duration { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - return time.Duration(tab.rand.Int63n(int64(tab.cfg.PingInterval))) -} - func (tab *Table) nextRefreshTime() time.Duration { - tab.mutex.Lock() - defer tab.mutex.Unlock() - half := tab.cfg.RefreshInterval / 2 return half + time.Duration(tab.rand.Int63n(int64(half))) } -// copyLiveNodes adds nodes from the table to the database if they have been in the table -// longer than seedMinTableTime. -func (tab *Table) copyLiveNodes() { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - now := time.Now() - for _, b := range &tab.buckets { - for _, n := range b.entries { - if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime { - tab.db.UpdateNode(unwrapNode(n)) - } - } - } -} - -// findnodeByID returns the n nodes in the table that are closest to the given id. -// This is used by the FINDNODE/v4 handler. -// -// The preferLive parameter says whether the caller wants liveness-checked results. If -// preferLive is true and the table contains any verified nodes, the result will not -// contain unverified nodes. However, if there are no verified nodes at all, the result -// will contain unverified nodes. -func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - // Scan all buckets. There might be a better way to do this, but there aren't that many - // buckets, so this solution should be fine. The worst-case complexity of this loop - // is O(tab.len() * nresults). - nodes := &nodesByDistance{target: target} - liveNodes := &nodesByDistance{target: target} - for _, b := range &tab.buckets { - for _, n := range b.entries { - nodes.push(n, nresults) - if preferLive && n.livenessChecks > 0 { - liveNodes.push(n, nresults) - } - } - } - - if preferLive && len(liveNodes.entries) > 0 { - return liveNodes - } - return nodes -} - -// appendLiveNodes adds nodes at the given distance to the result slice. -func (tab *Table) appendLiveNodes(dist uint, result []*enode.Node) []*enode.Node { - if dist > 256 { - return result - } - if dist == 0 { - return append(result, tab.self()) - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - for _, n := range tab.bucketAtDistance(int(dist)).entries { - if n.livenessChecks >= 1 { - node := n.Node // avoid handing out pointer to struct field - result = append(result, &node) - } - } - return result -} - -// len returns the number of nodes in the table. -func (tab *Table) len() (n int) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - for _, b := range &tab.buckets { - n += len(b.entries) - } - return n -} - -// bucketLen returns the number of nodes in the bucket for the given ID. -func (tab *Table) bucketLen(id enode.ID) int { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - return len(tab.bucket(id).entries) -} - // bucket returns the bucket for the given node ID hash. func (tab *Table) bucket(id enode.ID) *bucket { d := enode.LogDist(tab.self().ID(), id) @@ -511,95 +474,6 @@ func (tab *Table) bucketAtDistance(d int) *bucket { return tab.buckets[d-bucketMinDistance-1] } -// addSeenNode adds a node which may or may not be live to the end of a bucket. If the -// bucket has space available, adding the node succeeds immediately. Otherwise, the node is -// added to the replacements list. -// -// The caller must not hold tab.mutex. -func (tab *Table) addSeenNode(n *node) { - if n.ID() == tab.self().ID() { - return - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - b := tab.bucket(n.ID()) - if contains(b.entries, n.ID()) { - // Already in bucket, don't add. - return - } - if len(b.entries) >= bucketSize { - // Bucket full, maybe add as replacement. - tab.addReplacement(b, n) - return - } - if !tab.addIP(b, n.IP()) { - // Can't add: IP limit reached. - return - } - - // Add to end of bucket: - b.entries = append(b.entries, n) - b.replacements = deleteNode(b.replacements, n) - n.addedAt = time.Now() - - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(b, n) - } -} - -// addVerifiedNode adds a node whose existence has been verified recently to the front of a -// bucket. If the node is already in the bucket, it is moved to the front. If the bucket -// has no space, the node is added to the replacements list. -// -// There is an additional safety measure: if the table is still initializing the node -// is not added. This prevents an attack where the table could be filled by just sending -// ping repeatedly. -// -// The caller must not hold tab.mutex. -func (tab *Table) addVerifiedNode(n *node) { - if !tab.isInitDone() { - return - } - if n.ID() == tab.self().ID() { - return - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - b := tab.bucket(n.ID()) - if tab.bumpInBucket(b, n) { - // Already in bucket, moved to front. - return - } - if len(b.entries) >= bucketSize { - // Bucket full, maybe add as replacement. - tab.addReplacement(b, n) - return - } - if !tab.addIP(b, n.IP()) { - // Can't add: IP limit reached. - return - } - - // Add to front of bucket. - b.entries, _ = pushNode(b.entries, n, bucketSize) - b.replacements = deleteNode(b.replacements, n) - n.addedAt = time.Now() - - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(b, n) - } -} - -// delete removes an entry from the node table. It is used to evacuate dead nodes. -func (tab *Table) delete(node *node) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - tab.deleteInBucket(tab.bucket(node.ID()), node) -} - func (tab *Table) addIP(b *bucket, ip net.IP) bool { if len(ip) == 0 { return false // Nodes without IP cannot be added. @@ -627,128 +501,194 @@ func (tab *Table) removeIP(b *bucket, ip net.IP) { b.ips.Remove(ip) } -func (tab *Table) addReplacement(b *bucket, n *node) { - for _, e := range b.replacements { - if e.ID() == n.ID() { - return // already in list - } +// handleAddNode adds the node in the request to the table, if there is space. +// The caller must hold tab.mutex. +func (tab *Table) handleAddNode(req addNodeOp) bool { + if req.node.ID() == tab.self().ID() { + return false + } + // For nodes from inbound contact, there is an additional safety measure: if the table + // is still initializing the node is not added. + if req.isInbound && !tab.isInitDone() { + return false + } + + b := tab.bucket(req.node.ID()) + n, _ := tab.bumpInBucket(b, req.node, req.isInbound) + if n != nil { + // Already in bucket. + return false + } + if len(b.entries) >= bucketSize { + // Bucket full, maybe add as replacement. + tab.addReplacement(b, req.node) + return false + } + if !tab.addIP(b, req.node.IP()) { + // Can't add: IP limit reached. + return false + } + + // Add to bucket. + wn := &tableNode{Node: req.node} + if req.forceSetLive { + wn.livenessChecks = 1 + wn.isValidatedLive = true + } + b.entries = append(b.entries, wn) + b.replacements = deleteNode(b.replacements, wn.ID()) + tab.nodeAdded(b, wn) + return true +} + +// addReplacement adds n to the replacement cache of bucket b. +func (tab *Table) addReplacement(b *bucket, n *enode.Node) { + if containsID(b.replacements, n.ID()) { + // TODO: update ENR + return } if !tab.addIP(b, n.IP()) { return } - var removed *node - b.replacements, removed = pushNode(b.replacements, n, maxReplacements) + + wn := &tableNode{Node: n, addedToTable: time.Now()} + var removed *tableNode + b.replacements, removed = pushNode(b.replacements, wn, maxReplacements) if removed != nil { tab.removeIP(b, removed.IP()) } } -// replace removes n from the replacement list and replaces 'last' with it if it is the -// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced -// with someone else or became active. -func (tab *Table) replace(b *bucket, last *node) *node { - if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { - // Entry has moved, don't replace it. - return nil +func (tab *Table) nodeAdded(b *bucket, n *tableNode) { + if n.addedToTable == (time.Time{}) { + n.addedToTable = time.Now() } - // Still the last entry. - if len(b.replacements) == 0 { - tab.deleteInBucket(b, last) - return nil + n.addedToBucket = time.Now() + tab.revalidation.nodeAdded(tab, n) + if tab.nodeAddedHook != nil { + tab.nodeAddedHook(b, n) } - r := b.replacements[tab.rand.Intn(len(b.replacements))] - b.replacements = deleteNode(b.replacements, r) - b.entries[len(b.entries)-1] = r - tab.removeIP(b, last.IP()) - return r -} - -// bumpInBucket moves the given node to the front of the bucket entry list -// if it is contained in that list. -func (tab *Table) bumpInBucket(b *bucket, n *node) bool { - for i := range b.entries { - if b.entries[i].ID() == n.ID() { - if !n.IP().Equal(b.entries[i].IP()) { - // Endpoint has changed, ensure that the new IP fits into table limits. - tab.removeIP(b, b.entries[i].IP()) - if !tab.addIP(b, n.IP()) { - // It doesn't, put the previous one back. - tab.addIP(b, b.entries[i].IP()) - return false - } - } - // Move it to the front. - copy(b.entries[1:], b.entries[:i]) - b.entries[0] = n - return true - } + if metrics.Enabled { + bucketsCounter[b.index].Inc(1) } - return false } -func (tab *Table) deleteInBucket(b *bucket, n *node) { - // Check if the node is actually in the bucket so the removed hook - // isn't called multiple times for the same node. - if !contains(b.entries, n.ID()) { - return - } - b.entries = deleteNode(b.entries, n) - tab.removeIP(b, n.IP()) +func (tab *Table) nodeRemoved(b *bucket, n *tableNode) { + tab.revalidation.nodeRemoved(n) if tab.nodeRemovedHook != nil { tab.nodeRemovedHook(b, n) } + if metrics.Enabled { + bucketsCounter[b.index].Dec(1) + } } -func contains(ns []*node, id enode.ID) bool { - for _, n := range ns { - if n.ID() == id { - return true - } +// deleteInBucket removes node n from the table. +// If there are replacement nodes in the bucket, the node is replaced. +func (tab *Table) deleteInBucket(b *bucket, id enode.ID) *tableNode { + index := slices.IndexFunc(b.entries, func(e *tableNode) bool { return e.ID() == id }) + if index == -1 { + // Entry has been removed already. + return nil } - return false -} -// pushNode adds n to the front of list, keeping at most max items. -func pushNode(list []*node, n *node, max int) ([]*node, *node) { - if len(list) < max { - list = append(list, nil) + // Remove the node. + n := b.entries[index] + b.entries = slices.Delete(b.entries, index, index+1) + tab.removeIP(b, n.IP()) + tab.nodeRemoved(b, n) + + // Add replacement. + if len(b.replacements) == 0 { + tab.log.Debug("Removed dead node", "b", b.index, "id", n.ID(), "ip", n.IP()) + return nil } - removed := list[len(list)-1] - copy(list[1:], list) - list[0] = n - return list, removed + rindex := tab.rand.Intn(len(b.replacements)) + rep := b.replacements[rindex] + b.replacements = slices.Delete(b.replacements, rindex, rindex+1) + b.entries = append(b.entries, rep) + tab.nodeAdded(b, rep) + tab.log.Debug("Replaced dead node", "b", b.index, "id", n.ID(), "ip", n.IP(), "r", rep.ID(), "rip", rep.IP()) + return rep } -// deleteNode removes n from list. -func deleteNode(list []*node, n *node) []*node { - for i := range list { - if list[i].ID() == n.ID() { - return append(list[:i], list[i+1:]...) +// bumpInBucket updates a node record if it exists in the bucket. +// The second return value reports whether the node's endpoint (IP/port) was updated. +func (tab *Table) bumpInBucket(b *bucket, newRecord *enode.Node, isInbound bool) (n *tableNode, endpointChanged bool) { + i := slices.IndexFunc(b.entries, func(elem *tableNode) bool { + return elem.ID() == newRecord.ID() + }) + if i == -1 { + return nil, false // not in bucket + } + n = b.entries[i] + + // For inbound updates (from the node itself) we accept any change, even if it sets + // back the sequence number. For found nodes (!isInbound), seq has to advance. Note + // this check also ensures found discv4 nodes (which always have seq=0) can't be + // updated. + if newRecord.Seq() <= n.Seq() && !isInbound { + return n, false + } + + // Check endpoint update against IP limits. + ipchanged := newRecord.IPAddr() != n.IPAddr() + portchanged := newRecord.UDP() != n.UDP() + if ipchanged { + tab.removeIP(b, n.IP()) + if !tab.addIP(b, newRecord.IP()) { + // It doesn't fit with the limit, put the previous record back. + tab.addIP(b, n.IP()) + return n, false } } - return list -} -// nodesByDistance is a list of nodes, ordered by distance to target. -type nodesByDistance struct { - entries []*node - target enode.ID + // Apply update. + n.Node = newRecord + if ipchanged || portchanged { + // Ensure node is revalidated quickly for endpoint changes. + tab.revalidation.nodeEndpointChanged(tab, n) + return n, true + } + return n, false } -// push adds the given node to the list, keeping the total size below maxElems. -func (h *nodesByDistance) push(n *node, maxElems int) { - ix := sort.Search(len(h.entries), func(i int) bool { - return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 - }) +func (tab *Table) handleTrackRequest(op trackRequestOp) { + var fails int + if op.success { + // Reset failure counter because it counts _consecutive_ failures. + tab.db.UpdateFindFails(op.node.ID(), op.node.IP(), 0) + } else { + fails = tab.db.FindFails(op.node.ID(), op.node.IP()) + fails++ + tab.db.UpdateFindFails(op.node.ID(), op.node.IP(), fails) + } - end := len(h.entries) - if len(h.entries) < maxElems { - h.entries = append(h.entries, n) + tab.mutex.Lock() + defer tab.mutex.Unlock() + + b := tab.bucket(op.node.ID()) + // Remove the node from the local table if it fails to return anything useful too + // many times, but only if there are enough other nodes in the bucket. This latter + // condition specifically exists to make bootstrapping in smaller test networks more + // reliable. + if fails >= maxFindnodeFailures && len(b.entries) >= bucketSize/4 { + tab.deleteInBucket(b, op.node.ID()) } - if ix < end { - // Slide existing entries down to make room. - // This will overwrite the entry we just appended. - copy(h.entries[ix+1:], h.entries[ix:]) - h.entries[ix] = n + + // Add found nodes. + for _, n := range op.foundNodes { + tab.handleAddNode(addNodeOp{n, false, false}) } } + +// pushNode adds n to the front of list, keeping at most max items. +func pushNode(list []*tableNode, n *tableNode, max int) ([]*tableNode, *tableNode) { + if len(list) < max { + list = append(list, nil) + } + removed := list[len(list)-1] + copy(list[1:], list) + list[0] = n + return list, removed +} diff --git a/p2p/discover/table_reval.go b/p2p/discover/table_reval.go new file mode 100644 index 000000000000..f2ea8b34fa3e --- /dev/null +++ b/p2p/discover/table_reval.go @@ -0,0 +1,244 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package discover + +import ( + "fmt" + "math" + "slices" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +const never = mclock.AbsTime(math.MaxInt64) + +const slowRevalidationFactor = 3 + +// tableRevalidation implements the node revalidation process. +// It tracks all nodes contained in Table, and schedules sending PING to them. +type tableRevalidation struct { + fast revalidationList + slow revalidationList + activeReq map[enode.ID]struct{} +} + +type revalidationResponse struct { + n *tableNode + newRecord *enode.Node + didRespond bool +} + +func (tr *tableRevalidation) init(cfg *Config) { + tr.activeReq = make(map[enode.ID]struct{}) + tr.fast.nextTime = never + tr.fast.interval = cfg.PingInterval + tr.fast.name = "fast" + tr.slow.nextTime = never + tr.slow.interval = cfg.PingInterval * slowRevalidationFactor + tr.slow.name = "slow" +} + +// nodeAdded is called when the table receives a new node. +func (tr *tableRevalidation) nodeAdded(tab *Table, n *tableNode) { + tr.fast.push(n, tab.cfg.Clock.Now(), &tab.rand) +} + +// nodeRemoved is called when a node was removed from the table. +func (tr *tableRevalidation) nodeRemoved(n *tableNode) { + if n.revalList == nil { + panic(fmt.Errorf("removed node %v has nil revalList", n.ID())) + } + n.revalList.remove(n) +} + +// nodeEndpointChanged is called when a change in IP or port is detected. +func (tr *tableRevalidation) nodeEndpointChanged(tab *Table, n *tableNode) { + n.isValidatedLive = false + tr.moveToList(&tr.fast, n, tab.cfg.Clock.Now(), &tab.rand) +} + +// run performs node revalidation. +// It returns the next time it should be invoked, which is used in the Table main loop +// to schedule a timer. However, run can be called at any time. +func (tr *tableRevalidation) run(tab *Table, now mclock.AbsTime) (nextTime mclock.AbsTime) { + if n := tr.fast.get(now, &tab.rand, tr.activeReq); n != nil { + tr.startRequest(tab, n) + tr.fast.schedule(now, &tab.rand) + } + if n := tr.slow.get(now, &tab.rand, tr.activeReq); n != nil { + tr.startRequest(tab, n) + tr.slow.schedule(now, &tab.rand) + } + + return min(tr.fast.nextTime, tr.slow.nextTime) +} + +// startRequest spawns a revalidation request for node n. +func (tr *tableRevalidation) startRequest(tab *Table, n *tableNode) { + if _, ok := tr.activeReq[n.ID()]; ok { + panic(fmt.Errorf("duplicate startRequest (node %v)", n.ID())) + } + tr.activeReq[n.ID()] = struct{}{} + resp := revalidationResponse{n: n} + + // Fetch the node while holding lock. + tab.mutex.Lock() + node := n.Node + tab.mutex.Unlock() + + go tab.doRevalidate(resp, node) +} + +func (tab *Table) doRevalidate(resp revalidationResponse, node *enode.Node) { + // Ping the selected node and wait for a pong response. + remoteSeq, err := tab.net.ping(node) + resp.didRespond = err == nil + + // Also fetch record if the node replied and returned a higher sequence number. + if remoteSeq > node.Seq() { + newrec, err := tab.net.RequestENR(node) + if err != nil { + tab.log.Debug("ENR request failed", "id", node.ID(), "err", err) + } else { + resp.newRecord = newrec + } + } + + select { + case tab.revalResponseCh <- resp: + case <-tab.closed: + } +} + +// handleResponse processes the result of a revalidation request. +func (tr *tableRevalidation) handleResponse(tab *Table, resp revalidationResponse) { + var ( + now = tab.cfg.Clock.Now() + n = resp.n + b = tab.bucket(n.ID()) + ) + delete(tr.activeReq, n.ID()) + + // If the node was removed from the table while getting checked, we need to stop + // processing here to avoid re-adding it. + if n.revalList == nil { + return + } + + // Store potential seeds in database. + // This is done via defer to avoid holding Table lock while writing to DB. + defer func() { + if n.isValidatedLive && n.livenessChecks > 5 { + tab.db.UpdateNode(resp.n.Node) + } + }() + + // Remaining logic needs access to Table internals. + tab.mutex.Lock() + defer tab.mutex.Unlock() + + if !resp.didRespond { + n.livenessChecks /= 3 + if n.livenessChecks <= 0 { + tab.deleteInBucket(b, n.ID()) + } else { + tab.log.Debug("Node revalidation failed", "b", b.index, "id", n.ID(), "checks", n.livenessChecks, "q", n.revalList.name) + tr.moveToList(&tr.fast, n, now, &tab.rand) + } + return + } + + // The node responded. + n.livenessChecks++ + n.isValidatedLive = true + tab.log.Debug("Node revalidated", "b", b.index, "id", n.ID(), "checks", n.livenessChecks, "q", n.revalList.name) + var endpointChanged bool + if resp.newRecord != nil { + _, endpointChanged = tab.bumpInBucket(b, resp.newRecord, false) + } + + // Node moves to slow list if it passed and hasn't changed. + if !endpointChanged { + tr.moveToList(&tr.slow, n, now, &tab.rand) + } +} + +// moveToList ensures n is in the 'dest' list. +func (tr *tableRevalidation) moveToList(dest *revalidationList, n *tableNode, now mclock.AbsTime, rand randomSource) { + if n.revalList == dest { + return + } + if n.revalList != nil { + n.revalList.remove(n) + } + dest.push(n, now, rand) +} + +// revalidationList holds a list nodes and the next revalidation time. +type revalidationList struct { + nodes []*tableNode + nextTime mclock.AbsTime + interval time.Duration + name string +} + +// get returns a random node from the queue. Nodes in the 'exclude' map are not returned. +func (list *revalidationList) get(now mclock.AbsTime, rand randomSource, exclude map[enode.ID]struct{}) *tableNode { + if now < list.nextTime || len(list.nodes) == 0 { + return nil + } + for i := 0; i < len(list.nodes)*3; i++ { + n := list.nodes[rand.Intn(len(list.nodes))] + _, excluded := exclude[n.ID()] + if !excluded { + return n + } + } + return nil +} + +func (list *revalidationList) schedule(now mclock.AbsTime, rand randomSource) { + list.nextTime = now.Add(time.Duration(rand.Int63n(int64(list.interval)))) +} + +func (list *revalidationList) push(n *tableNode, now mclock.AbsTime, rand randomSource) { + list.nodes = append(list.nodes, n) + if list.nextTime == never { + list.schedule(now, rand) + } + n.revalList = list +} + +func (list *revalidationList) remove(n *tableNode) { + i := slices.Index(list.nodes, n) + if i == -1 { + panic(fmt.Errorf("node %v not found in list", n.ID())) + } + list.nodes = slices.Delete(list.nodes, i, i+1) + if len(list.nodes) == 0 { + list.nextTime = never + } + n.revalList = nil +} + +func (list *revalidationList) contains(id enode.ID) bool { + return slices.ContainsFunc(list.nodes, func(n *tableNode) bool { + return n.ID() == id + }) +} diff --git a/p2p/discover/table_reval_test.go b/p2p/discover/table_reval_test.go new file mode 100644 index 000000000000..360544393439 --- /dev/null +++ b/p2p/discover/table_reval_test.go @@ -0,0 +1,119 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package discover + +import ( + "net" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" +) + +// This test checks that revalidation can handle a node disappearing while +// a request is active. +func TestRevalidation_nodeRemoved(t *testing.T) { + var ( + clock mclock.Simulated + transport = newPingRecorder() + tab, db = newInactiveTestTable(transport, Config{Clock: &clock}) + tr = &tab.revalidation + ) + defer db.Close() + + // Add a node to the table. + node := nodeAtDistance(tab.self().ID(), 255, net.IP{77, 88, 99, 1}) + tab.handleAddNode(addNodeOp{node: node}) + + // Start a revalidation request. Schedule once to get the next start time, + // then advance the clock to that point and schedule again to start. + next := tr.run(tab, clock.Now()) + clock.Run(time.Duration(next + 1)) + tr.run(tab, clock.Now()) + if len(tr.activeReq) != 1 { + t.Fatal("revalidation request did not start:", tr.activeReq) + } + + // Delete the node. + tab.deleteInBucket(tab.bucket(node.ID()), node.ID()) + + // Now finish the revalidation request. + var resp revalidationResponse + select { + case resp = <-tab.revalResponseCh: + case <-time.After(1 * time.Second): + t.Fatal("timed out waiting for revalidation") + } + tr.handleResponse(tab, resp) + + // Ensure the node was not re-added to the table. + if tab.getNode(node.ID()) != nil { + t.Fatal("node was re-added to Table") + } + if tr.fast.contains(node.ID()) || tr.slow.contains(node.ID()) { + t.Fatal("removed node contained in revalidation list") + } +} + +// This test checks that nodes with an updated endpoint remain in the fast revalidation list. +func TestRevalidation_endpointUpdate(t *testing.T) { + var ( + clock mclock.Simulated + transport = newPingRecorder() + tab, db = newInactiveTestTable(transport, Config{Clock: &clock}) + tr = &tab.revalidation + ) + defer db.Close() + + // Add node to table. + node := nodeAtDistance(tab.self().ID(), 255, net.IP{77, 88, 99, 1}) + tab.handleAddNode(addNodeOp{node: node}) + + // Update the record in transport, including endpoint update. + record := node.Record() + record.Set(enr.IP{100, 100, 100, 100}) + record.Set(enr.UDP(9999)) + nodev2 := enode.SignNull(record, node.ID()) + transport.updateRecord(nodev2) + + // Start a revalidation request. Schedule once to get the next start time, + // then advance the clock to that point and schedule again to start. + next := tr.run(tab, clock.Now()) + clock.Run(time.Duration(next + 1)) + tr.run(tab, clock.Now()) + if len(tr.activeReq) != 1 { + t.Fatal("revalidation request did not start:", tr.activeReq) + } + + // Now finish the revalidation request. + var resp revalidationResponse + select { + case resp = <-tab.revalResponseCh: + case <-time.After(1 * time.Second): + t.Fatal("timed out waiting for revalidation") + } + tr.handleResponse(tab, resp) + + if tr.fast.nodes[0].ID() != node.ID() { + t.Fatal("node not contained in fast revalidation list") + } + if tr.fast.nodes[0].isValidatedLive { + t.Fatal("node is marked live after endpoint change") + } +} diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 3ba342225133..30e7d56f4afd 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -20,14 +20,17 @@ import ( "crypto/ecdsa" "fmt" "math/rand" - "net" "reflect" + "slices" "testing" "testing/quick" "time" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/testlog" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/netutil" @@ -49,106 +52,109 @@ func TestTable_pingReplace(t *testing.T) { } func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) { + simclock := new(mclock.Simulated) transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{ + Clock: simclock, + Log: testlog.Logger(t, log.LevelTrace), + }) defer db.Close() defer tab.close() <-tab.initDone // Fill up the sender's bucket. - pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8") - pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99)) - last := fillBucket(tab, pingSender) + replacementNodeKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8") + replacementNode := enode.NewV4(&replacementNodeKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99) + last := fillBucket(tab, replacementNode.ID()) + tab.mutex.Lock() + nodeEvents := newNodeEventRecorder(128) + tab.nodeAddedHook = nodeEvents.nodeAdded + tab.nodeRemovedHook = nodeEvents.nodeRemoved + tab.mutex.Unlock() - // Add the sender as if it just pinged us. Revalidate should replace the last node in - // its bucket if it is unresponsive. Revalidate again to ensure that + // The revalidation process should replace + // this node in the bucket if it is unresponsive. transport.dead[last.ID()] = !lastInBucketIsResponding - transport.dead[pingSender.ID()] = !newNodeIsResponding - tab.addSeenNode(pingSender) - tab.doRevalidate(make(chan struct{}, 1)) - tab.doRevalidate(make(chan struct{}, 1)) - - if !transport.pinged[last.ID()] { - // Oldest node in bucket is pinged to see whether it is still alive. - t.Error("table did not ping last node in bucket") + transport.dead[replacementNode.ID()] = !newNodeIsResponding + + // Add replacement node to table. + tab.addFoundNode(replacementNode, false) + + t.Log("last:", last.ID()) + t.Log("replacement:", replacementNode.ID()) + + // Wait until the last node was pinged. + waitForRevalidationPing(t, transport, tab, last.ID()) + + if !lastInBucketIsResponding { + if !nodeEvents.waitNodeAbsent(last.ID(), 2*time.Second) { + t.Error("last node was not removed") + } + if !nodeEvents.waitNodePresent(replacementNode.ID(), 2*time.Second) { + t.Error("replacement node was not added") + } + + // If a replacement is expected, we also need to wait until the replacement node + // was pinged and added/removed. + waitForRevalidationPing(t, transport, tab, replacementNode.ID()) + if !newNodeIsResponding { + if !nodeEvents.waitNodeAbsent(replacementNode.ID(), 2*time.Second) { + t.Error("replacement node was not removed") + } + } } + // Check bucket content. tab.mutex.Lock() defer tab.mutex.Unlock() wantSize := bucketSize if !lastInBucketIsResponding && !newNodeIsResponding { wantSize-- } - if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize { - t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize) + bucket := tab.bucket(replacementNode.ID()) + if l := len(bucket.entries); l != wantSize { + t.Errorf("wrong bucket size after revalidation: got %d, want %d", l, wantSize) } - if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding { - t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding) + if ok := containsID(bucket.entries, last.ID()); ok != lastInBucketIsResponding { + t.Errorf("revalidated node found: %t, want: %t", ok, lastInBucketIsResponding) } wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding - if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry { - t.Errorf("new entry found: %t, want: %t", found, wantNewEntry) + if ok := containsID(bucket.entries, replacementNode.ID()); ok != wantNewEntry { + t.Errorf("replacement node found: %t, want: %t", ok, wantNewEntry) } } -func TestBucket_bumpNoDuplicates(t *testing.T) { - t.Parallel() - cfg := &quick.Config{ - MaxCount: 1000, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - Values: func(args []reflect.Value, rand *rand.Rand) { - // generate a random list of nodes. this will be the content of the bucket. - n := rand.Intn(bucketSize-1) + 1 - nodes := make([]*node, n) - for i := range nodes { - nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200)) - } - args[0] = reflect.ValueOf(nodes) - // generate random bump positions. - bumps := make([]int, rand.Intn(100)) - for i := range bumps { - bumps[i] = rand.Intn(len(nodes)) - } - args[1] = reflect.ValueOf(bumps) - }, - } - - prop := func(nodes []*node, bumps []int) (ok bool) { - tab, db := newTestTable(newPingRecorder()) - defer db.Close() - defer tab.close() +// waitForRevalidationPing waits until a PING message is sent to a node with the given id. +func waitForRevalidationPing(t *testing.T, transport *pingRecorder, tab *Table, id enode.ID) *enode.Node { + t.Helper() - b := &bucket{entries: make([]*node, len(nodes))} - copy(b.entries, nodes) - for i, pos := range bumps { - tab.bumpInBucket(b, b.entries[pos]) - if hasDuplicates(b.entries) { - t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps)) - for _, n := range b.entries { - t.Logf(" %p", n) - } - return false - } + simclock := tab.cfg.Clock.(*mclock.Simulated) + maxAttempts := tab.len() * 8 + for i := 0; i < maxAttempts; i++ { + simclock.Run(tab.cfg.PingInterval * slowRevalidationFactor) + p := transport.waitPing(2 * time.Second) + if p == nil { + t.Fatal("Table did not send revalidation ping") + } + if id == (enode.ID{}) || p.ID() == id { + return p } - checkIPLimitInvariant(t, tab) - return true - } - if err := quick.Check(prop, cfg); err != nil { - t.Error(err) } + t.Fatalf("Table did not ping node %v (%d attempts)", id, maxAttempts) + return nil } // This checks that the table-wide IP limit is applied correctly. func TestTable_IPLimit(t *testing.T) { transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{}) defer db.Close() defer tab.close() for i := 0; i < tableIPLimit+1; i++ { n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)}) - tab.addSeenNode(n) + tab.addFoundNode(n, false) } if tab.len() > tableIPLimit { t.Errorf("too many nodes in table") @@ -159,14 +165,14 @@ func TestTable_IPLimit(t *testing.T) { // This checks that the per-bucket IP limit is applied correctly. func TestTable_BucketIPLimit(t *testing.T) { transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{}) defer db.Close() defer tab.close() d := 3 for i := 0; i < bucketIPLimit+1; i++ { n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)}) - tab.addSeenNode(n) + tab.addFoundNode(n, false) } if tab.len() > bucketIPLimit { t.Errorf("too many nodes in table") @@ -196,7 +202,7 @@ func TestTable_findnodeByID(t *testing.T) { test := func(test *closeTest) bool { // for any node table, Target and N transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{}) defer db.Close() defer tab.close() fillTable(tab, test.All, true) @@ -227,7 +233,7 @@ func TestTable_findnodeByID(t *testing.T) { // check that the result nodes have minimum distance to target. for _, b := range tab.buckets { for _, n := range b.entries { - if contains(result, n.ID()) { + if containsID(result, n.ID()) { continue // don't run the check below for nodes in result } farthestResult := result[len(result)-1].ID() @@ -250,7 +256,7 @@ func TestTable_findnodeByID(t *testing.T) { type closeTest struct { Self enode.ID Target enode.ID - All []*node + All []*enode.Node N int } @@ -263,15 +269,14 @@ func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value { for _, id := range gen([]enode.ID{}, rand).([]enode.ID) { r := new(enr.Record) r.Set(enr.IP(genIP(rand))) - n := wrapNode(enode.SignNull(r, id)) - n.livenessChecks = 1 + n := enode.SignNull(r, id) t.All = append(t.All, n) } return reflect.ValueOf(t) } -func TestTable_addVerifiedNode(t *testing.T) { - tab, db := newTestTable(newPingRecorder()) +func TestTable_addInboundNode(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) <-tab.initDone defer db.Close() defer tab.close() @@ -279,31 +284,29 @@ func TestTable_addVerifiedNode(t *testing.T) { // Insert two nodes. n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1}) n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2}) - tab.addSeenNode(n1) - tab.addSeenNode(n2) + tab.addFoundNode(n1, false) + tab.addFoundNode(n2, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2}) - // Verify bucket content: - bcontent := []*node{n1, n2} - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) { - t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries) - } - - // Add a changed version of n2. + // Add a changed version of n2. The bucket should be updated. newrec := n2.Record() newrec.Set(enr.IP{99, 99, 99, 99}) - newn2 := wrapNode(enode.SignNull(newrec, n2.ID())) - tab.addVerifiedNode(newn2) - - // Check that bucket is updated correctly. - newBcontent := []*node{newn2, n1} - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, newBcontent) { - t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries) - } - checkIPLimitInvariant(t, tab) + n2v2 := enode.SignNull(newrec, n2.ID()) + tab.addInboundNode(n2v2) + checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) + + // Try updating n2 without sequence number change. The update is accepted + // because it's inbound. + newrec = n2.Record() + newrec.Set(enr.IP{100, 100, 100, 100}) + newrec.SetSeq(n2.Seq()) + n2v3 := enode.SignNull(newrec, n2.ID()) + tab.addInboundNode(n2v3) + checkBucketContent(t, tab, []*enode.Node{n1, n2v3}) } -func TestTable_addSeenNode(t *testing.T) { - tab, db := newTestTable(newPingRecorder()) +func TestTable_addFoundNode(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) <-tab.initDone defer db.Close() defer tab.close() @@ -311,25 +314,86 @@ func TestTable_addSeenNode(t *testing.T) { // Insert two nodes. n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1}) n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2}) - tab.addSeenNode(n1) - tab.addSeenNode(n2) - - // Verify bucket content: - bcontent := []*node{n1, n2} - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) { - t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries) - } + tab.addFoundNode(n1, false) + tab.addFoundNode(n2, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2}) - // Add a changed version of n2. + // Add a changed version of n2. The bucket should be updated. newrec := n2.Record() newrec.Set(enr.IP{99, 99, 99, 99}) - newn2 := wrapNode(enode.SignNull(newrec, n2.ID())) - tab.addSeenNode(newn2) + n2v2 := enode.SignNull(newrec, n2.ID()) + tab.addFoundNode(n2v2, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) + + // Try updating n2 without a sequence number change. + // The update should not be accepted. + newrec = n2.Record() + newrec.Set(enr.IP{100, 100, 100, 100}) + newrec.SetSeq(n2.Seq()) + n2v3 := enode.SignNull(newrec, n2.ID()) + tab.addFoundNode(n2v3, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) +} + +// This test checks that discv4 nodes can update their own endpoint via PING. +func TestTable_addInboundNodeUpdateV4Accept(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) + <-tab.initDone + defer db.Close() + defer tab.close() + + // Add a v4 node. + key, _ := crypto.HexToECDSA("dd3757a8075e88d0f2b1431e7d3c5b1562e1c0aab9643707e8cbfcc8dae5cfe3") + n1 := enode.NewV4(&key.PublicKey, net.IP{88, 77, 66, 1}, 9000, 9000) + tab.addInboundNode(n1) + checkBucketContent(t, tab, []*enode.Node{n1}) + + // Add an updated version with changed IP. + // The update will be accepted because it is inbound. + n1v2 := enode.NewV4(&key.PublicKey, net.IP{99, 99, 99, 99}, 9000, 9000) + tab.addInboundNode(n1v2) + checkBucketContent(t, tab, []*enode.Node{n1v2}) +} + +// This test checks that discv4 node entries will NOT be updated when a +// changed record is found. +func TestTable_addFoundNodeV4UpdateReject(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) + <-tab.initDone + defer db.Close() + defer tab.close() - // Check that bucket content is unchanged. - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) { - t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries) + // Add a v4 node. + key, _ := crypto.HexToECDSA("dd3757a8075e88d0f2b1431e7d3c5b1562e1c0aab9643707e8cbfcc8dae5cfe3") + n1 := enode.NewV4(&key.PublicKey, net.IP{88, 77, 66, 1}, 9000, 9000) + tab.addFoundNode(n1, false) + checkBucketContent(t, tab, []*enode.Node{n1}) + + // Add an updated version with changed IP. + // The update won't be accepted because it isn't inbound. + n1v2 := enode.NewV4(&key.PublicKey, net.IP{99, 99, 99, 99}, 9000, 9000) + tab.addFoundNode(n1v2, false) + checkBucketContent(t, tab, []*enode.Node{n1}) +} + +func checkBucketContent(t *testing.T, tab *Table, nodes []*enode.Node) { + t.Helper() + + b := tab.bucket(nodes[0].ID()) + if reflect.DeepEqual(unwrapNodes(b.entries), nodes) { + return } + t.Log("wrong bucket content. have nodes:") + for _, n := range b.entries { + t.Logf(" %v (seq=%v, ip=%v)", n.ID(), n.Seq(), n.IP()) + } + t.Log("want nodes:") + for _, n := range nodes { + t.Logf(" %v (seq=%v, ip=%v)", n.ID(), n.Seq(), n.IP()) + } + t.FailNow() + + // Also check IP limits. checkIPLimitInvariant(t, tab) } @@ -337,7 +401,10 @@ func TestTable_addSeenNode(t *testing.T) { // announces a new sequence number, the new record should be pulled. func TestTable_revalidateSyncRecord(t *testing.T) { transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{ + Clock: new(mclock.Simulated), + Log: testlog.Logger(t, log.LevelTrace), + }) <-tab.initDone defer db.Close() defer tab.close() @@ -346,15 +413,19 @@ func TestTable_revalidateSyncRecord(t *testing.T) { var r enr.Record r.Set(enr.IP(net.IP{127, 0, 0, 1})) id := enode.ID{1} - n1 := wrapNode(enode.SignNull(&r, id)) - tab.addSeenNode(n1) + n1 := enode.SignNull(&r, id) + tab.addFoundNode(n1, false) // Update the node record. r.Set(enr.WithEntry("foo", "bar")) n2 := enode.SignNull(&r, id) transport.updateRecord(n2) - tab.doRevalidate(make(chan struct{}, 1)) + // Wait for revalidation. We wait for the node to be revalidated two times + // in order to synchronize with the update in the table. + waitForRevalidationPing(t, transport, tab, n2.ID()) + waitForRevalidationPing(t, transport, tab, n2.ID()) + intable := tab.getNode(id) if !reflect.DeepEqual(intable, n2) { t.Fatalf("table contains old record with seq %d, want seq %d", intable.Seq(), n2.Seq()) @@ -366,7 +437,7 @@ func TestNodesPush(t *testing.T) { n1 := nodeAtDistance(target, 255, intIP(1)) n2 := nodeAtDistance(target, 254, intIP(2)) n3 := nodeAtDistance(target, 253, intIP(3)) - perm := [][]*node{ + perm := [][]*enode.Node{ {n3, n2, n1}, {n3, n1, n2}, {n2, n3, n1}, @@ -381,7 +452,7 @@ func TestNodesPush(t *testing.T) { for _, n := range nodes { list.push(n, 3) } - if !slicesEqual(list.entries, perm[0], nodeIDEqual) { + if !slices.EqualFunc(list.entries, perm[0], nodeIDEqual) { t.Fatal("not equal") } } @@ -392,28 +463,16 @@ func TestNodesPush(t *testing.T) { for _, n := range nodes { list.push(n, 2) } - if !slicesEqual(list.entries, perm[0][:2], nodeIDEqual) { + if !slices.EqualFunc(list.entries, perm[0][:2], nodeIDEqual) { t.Fatal("not equal") } } } -func nodeIDEqual(n1, n2 *node) bool { +func nodeIDEqual[N nodeType](n1, n2 N) bool { return n1.ID() == n2.ID() } -func slicesEqual[T any](s1, s2 []T, check func(e1, e2 T) bool) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if !check(s1[i], s2[i]) { - return false - } - } - return true -} - // gen wraps quick.Value so it's easier to use. // it generates a random value of the given value's type. func gen(typ interface{}, rand *rand.Rand) interface{} { diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go index f5d4d39bdbb7..997ac3779994 100644 --- a/p2p/discover/table_util_test.go +++ b/p2p/discover/table_util_test.go @@ -26,6 +26,8 @@ import ( "net" "slices" "sync" + "sync/atomic" + "time" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/enode" @@ -40,27 +42,32 @@ func init() { nullNode = enode.SignNull(&r, enode.ID{}) } -func newTestTable(t transport) (*Table, *enode.DB) { - cfg := Config{} +func newTestTable(t transport, cfg Config) (*Table, *enode.DB) { + tab, db := newInactiveTestTable(t, cfg) + go tab.loop() + return tab, db +} + +// newInactiveTestTable creates a Table without running the main loop. +func newInactiveTestTable(t transport, cfg Config) (*Table, *enode.DB) { db, _ := enode.OpenDB("") tab, _ := newTable(t, db, cfg) - go tab.loop() return tab, db } // nodeAtDistance creates a node for which enode.LogDist(base, n.id) == ld. -func nodeAtDistance(base enode.ID, ld int, ip net.IP) *node { +func nodeAtDistance(base enode.ID, ld int, ip net.IP) *enode.Node { var r enr.Record r.Set(enr.IP(ip)) r.Set(enr.UDP(30303)) - return wrapNode(enode.SignNull(&r, idAtDistance(base, ld))) + return enode.SignNull(&r, idAtDistance(base, ld)) } // nodesAtDistance creates n nodes for which enode.LogDist(base, node.ID()) == ld. func nodesAtDistance(base enode.ID, ld int, n int) []*enode.Node { results := make([]*enode.Node, n) for i := range results { - results[i] = unwrapNode(nodeAtDistance(base, ld, intIP(i))) + results[i] = nodeAtDistance(base, ld, intIP(i)) } return results } @@ -98,31 +105,33 @@ func intIP(i int) net.IP { } // fillBucket inserts nodes into the given bucket until it is full. -func fillBucket(tab *Table, n *node) (last *node) { - ld := enode.LogDist(tab.self().ID(), n.ID()) - b := tab.bucket(n.ID()) +func fillBucket(tab *Table, id enode.ID) (last *tableNode) { + ld := enode.LogDist(tab.self().ID(), id) + b := tab.bucket(id) for len(b.entries) < bucketSize { - b.entries = append(b.entries, nodeAtDistance(tab.self().ID(), ld, intIP(ld))) + node := nodeAtDistance(tab.self().ID(), ld, intIP(ld)) + if !tab.addFoundNode(node, false) { + panic("node not added") + } } return b.entries[bucketSize-1] } // fillTable adds nodes the table to the end of their corresponding bucket // if the bucket is not full. The caller must not hold tab.mutex. -func fillTable(tab *Table, nodes []*node, setLive bool) { +func fillTable(tab *Table, nodes []*enode.Node, setLive bool) { for _, n := range nodes { - if setLive { - n.livenessChecks = 1 - } - tab.addSeenNode(n) + tab.addFoundNode(n, setLive) } } type pingRecorder struct { - mu sync.Mutex - dead, pinged map[enode.ID]bool - records map[enode.ID]*enode.Node - n *enode.Node + mu sync.Mutex + cond *sync.Cond + dead map[enode.ID]bool + records map[enode.ID]*enode.Node + pinged []*enode.Node + n *enode.Node } func newPingRecorder() *pingRecorder { @@ -130,12 +139,13 @@ func newPingRecorder() *pingRecorder { r.Set(enr.IP{0, 0, 0, 0}) n := enode.SignNull(&r, enode.ID{}) - return &pingRecorder{ + t := &pingRecorder{ dead: make(map[enode.ID]bool), - pinged: make(map[enode.ID]bool), records: make(map[enode.ID]*enode.Node), n: n, } + t.cond = sync.NewCond(&t.mu) + return t } // updateRecord updates a node record. Future calls to ping and @@ -151,12 +161,40 @@ func (t *pingRecorder) Self() *enode.Node { return nullNode } func (t *pingRecorder) lookupSelf() []*enode.Node { return nil } func (t *pingRecorder) lookupRandom() []*enode.Node { return nil } +func (t *pingRecorder) waitPing(timeout time.Duration) *enode.Node { + t.mu.Lock() + defer t.mu.Unlock() + + // Wake up the loop on timeout. + var timedout atomic.Bool + timer := time.AfterFunc(timeout, func() { + timedout.Store(true) + t.cond.Broadcast() + }) + defer timer.Stop() + + // Wait for a ping. + for { + if timedout.Load() { + return nil + } + if len(t.pinged) > 0 { + n := t.pinged[0] + t.pinged = append(t.pinged[:0], t.pinged[1:]...) + return n + } + t.cond.Wait() + } +} + // ping simulates a ping request. func (t *pingRecorder) ping(n *enode.Node) (seq uint64, err error) { t.mu.Lock() defer t.mu.Unlock() - t.pinged[n.ID()] = true + t.pinged = append(t.pinged, n) + t.cond.Broadcast() + if t.dead[n.ID()] { return 0, errTimeout } @@ -177,7 +215,7 @@ func (t *pingRecorder) RequestENR(n *enode.Node) (*enode.Node, error) { return t.records[n.ID()], nil } -func hasDuplicates(slice []*node) bool { +func hasDuplicates(slice []*enode.Node) bool { seen := make(map[enode.ID]bool, len(slice)) for i, e := range slice { if e == nil { @@ -219,14 +257,14 @@ func nodeEqual(n1 *enode.Node, n2 *enode.Node) bool { return n1.ID() == n2.ID() && n1.IP().Equal(n2.IP()) } -func sortByID(nodes []*enode.Node) { - slices.SortFunc(nodes, func(a, b *enode.Node) int { +func sortByID[N nodeType](nodes []N) { + slices.SortFunc(nodes, func(a, b N) int { return bytes.Compare(a.ID().Bytes(), b.ID().Bytes()) }) } -func sortedByDistanceTo(distbase enode.ID, slice []*node) bool { - return slices.IsSortedFunc(slice, func(a, b *node) int { +func sortedByDistanceTo(distbase enode.ID, slice []*enode.Node) bool { + return slices.IsSortedFunc(slice, func(a, b *enode.Node) int { return enode.DistCmp(distbase, a.ID(), b.ID()) }) } @@ -256,3 +294,57 @@ func hexEncPubkey(h string) (ret encPubkey) { copy(ret[:], b) return ret } + +type nodeEventRecorder struct { + evc chan recordedNodeEvent +} + +type recordedNodeEvent struct { + node *tableNode + added bool +} + +func newNodeEventRecorder(buffer int) *nodeEventRecorder { + return &nodeEventRecorder{ + evc: make(chan recordedNodeEvent, buffer), + } +} + +func (set *nodeEventRecorder) nodeAdded(b *bucket, n *tableNode) { + select { + case set.evc <- recordedNodeEvent{n, true}: + default: + panic("no space in event buffer") + } +} + +func (set *nodeEventRecorder) nodeRemoved(b *bucket, n *tableNode) { + select { + case set.evc <- recordedNodeEvent{n, false}: + default: + panic("no space in event buffer") + } +} + +func (set *nodeEventRecorder) waitNodePresent(id enode.ID, timeout time.Duration) bool { + return set.waitNodeEvent(id, timeout, true) +} + +func (set *nodeEventRecorder) waitNodeAbsent(id enode.ID, timeout time.Duration) bool { + return set.waitNodeEvent(id, timeout, false) +} + +func (set *nodeEventRecorder) waitNodeEvent(id enode.ID, timeout time.Duration, added bool) bool { + timer := time.NewTimer(timeout) + defer timer.Stop() + for { + select { + case ev := <-set.evc: + if ev.node.ID() == id && ev.added == added { + return true + } + case <-timer.C: + return false + } + } +} diff --git a/p2p/discover/v4_lookup_test.go b/p2p/discover/v4_lookup_test.go index 5682f262be76..bc9475a8b369 100644 --- a/p2p/discover/v4_lookup_test.go +++ b/p2p/discover/v4_lookup_test.go @@ -19,7 +19,7 @@ package discover import ( "crypto/ecdsa" "fmt" - "net" + "net/netip" "slices" "testing" @@ -40,7 +40,7 @@ func TestUDPv4_Lookup(t *testing.T) { } // Seed table with initial node. - fillTable(test.table, []*node{wrapNode(lookupTestnet.node(256, 0))}, true) + fillTable(test.table, []*enode.Node{lookupTestnet.node(256, 0)}, true) // Start the lookup. resultC := make(chan []*enode.Node, 1) @@ -70,9 +70,9 @@ func TestUDPv4_LookupIterator(t *testing.T) { defer test.close() // Seed table with initial nodes. - bootnodes := make([]*node, len(lookupTestnet.dists[256])) + bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256])) for i := range lookupTestnet.dists[256] { - bootnodes[i] = wrapNode(lookupTestnet.node(256, i)) + bootnodes[i] = lookupTestnet.node(256, i) } fillTable(test.table, bootnodes, true) go serveTestnet(test, lookupTestnet) @@ -105,9 +105,9 @@ func TestUDPv4_LookupIteratorClose(t *testing.T) { defer test.close() // Seed table with initial nodes. - bootnodes := make([]*node, len(lookupTestnet.dists[256])) + bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256])) for i := range lookupTestnet.dists[256] { - bootnodes[i] = wrapNode(lookupTestnet.node(256, i)) + bootnodes[i] = lookupTestnet.node(256, i) } fillTable(test.table, bootnodes, true) go serveTestnet(test, lookupTestnet) @@ -136,7 +136,7 @@ func TestUDPv4_LookupIteratorClose(t *testing.T) { func serveTestnet(test *udpTest, testnet *preminedTestnet) { for done := false; !done; { - done = test.waitPacketOut(func(p v4wire.Packet, to *net.UDPAddr, hash []byte) { + done = test.waitPacketOut(func(p v4wire.Packet, to netip.AddrPort, hash []byte) { n, key := testnet.nodeByAddr(to) switch p.(type) { case *v4wire.Ping: @@ -158,10 +158,10 @@ func checkLookupResults(t *testing.T, tn *preminedTestnet, results []*enode.Node for _, e := range results { t.Logf(" ld=%d, %x", enode.LogDist(tn.target.id(), e.ID()), e.ID().Bytes()) } - if hasDuplicates(wrapNodes(results)) { + if hasDuplicates(results) { t.Errorf("result set contains duplicate entries") } - if !sortedByDistanceTo(tn.target.id(), wrapNodes(results)) { + if !sortedByDistanceTo(tn.target.id(), results) { t.Errorf("result set not sorted by distance to target") } wantNodes := tn.closest(len(results)) @@ -264,9 +264,10 @@ func (tn *preminedTestnet) node(dist, index int) *enode.Node { return n } -func (tn *preminedTestnet) nodeByAddr(addr *net.UDPAddr) (*enode.Node, *ecdsa.PrivateKey) { - dist := int(addr.IP[1])<<8 + int(addr.IP[2]) - index := int(addr.IP[3]) +func (tn *preminedTestnet) nodeByAddr(addr netip.AddrPort) (*enode.Node, *ecdsa.PrivateKey) { + ip := addr.Addr().As4() + dist := int(ip[1])<<8 + int(ip[2]) + index := int(ip[3]) key := tn.dists[dist][index] return tn.node(dist, index), key } @@ -274,7 +275,7 @@ func (tn *preminedTestnet) nodeByAddr(addr *net.UDPAddr) (*enode.Node, *ecdsa.Pr func (tn *preminedTestnet) nodesAtDistance(dist int) []v4wire.Node { result := make([]v4wire.Node, len(tn.dists[dist])) for i := range result { - result[i] = nodeToRPC(wrapNode(tn.node(dist, i))) + result[i] = nodeToRPC(tn.node(dist, i)) } return result } diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 7a0a0f1c7779..3880ca34a708 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -26,6 +26,7 @@ import ( "fmt" "io" "net" + "net/netip" "sync" "time" @@ -45,6 +46,7 @@ var ( errClockWarp = errors.New("reply deadline too far in the future") errClosed = errors.New("socket closed") errLowPort = errors.New("low port") + errNoUDPEndpoint = errors.New("node has no UDP endpoint") ) const ( @@ -93,7 +95,7 @@ type UDPv4 struct { type replyMatcher struct { // these fields must match in the reply. from enode.ID - ip net.IP + ip netip.Addr ptype byte // time when the request must complete @@ -119,7 +121,7 @@ type replyMatchFunc func(v4wire.Packet) (matched bool, requestDone bool) // reply is a reply packet from a certain node. type reply struct { from enode.ID - ip net.IP + ip netip.Addr data v4wire.Packet // loop indicates whether there was // a matching request by sending on this channel. @@ -142,7 +144,7 @@ func ListenV4(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { log: cfg.Log, } - tab, err := newMeteredTable(t, ln.Database(), cfg) + tab, err := newTable(t, ln.Database(), cfg) if err != nil { return nil, err } @@ -201,9 +203,12 @@ func (t *UDPv4) Resolve(n *enode.Node) *enode.Node { } func (t *UDPv4) ourEndpoint() v4wire.Endpoint { - n := t.Self() - a := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} - return v4wire.NewEndpoint(a, uint16(n.TCP())) + node := t.Self() + addr, ok := node.UDPEndpoint() + if !ok { + return v4wire.Endpoint{} + } + return v4wire.NewEndpoint(addr, uint16(node.TCP())) } // Ping sends a ping message to the given node. @@ -214,7 +219,11 @@ func (t *UDPv4) Ping(n *enode.Node) error { // ping sends a ping message to the given node and waits for a reply. func (t *UDPv4) ping(n *enode.Node) (seq uint64, err error) { - rm := t.sendPing(n.ID(), &net.UDPAddr{IP: n.IP(), Port: n.UDP()}, nil) + addr, ok := n.UDPEndpoint() + if !ok { + return 0, errNoUDPEndpoint + } + rm := t.sendPing(n.ID(), addr, nil) if err = <-rm.errc; err == nil { seq = rm.reply.(*v4wire.Pong).ENRSeq } @@ -223,7 +232,7 @@ func (t *UDPv4) ping(n *enode.Node) (seq uint64, err error) { // sendPing sends a ping message to the given node and invokes the callback // when the reply arrives. -func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *replyMatcher { +func (t *UDPv4) sendPing(toid enode.ID, toaddr netip.AddrPort, callback func()) *replyMatcher { req := t.makePing(toaddr) packet, hash, err := v4wire.Encode(t.priv, req) if err != nil { @@ -233,7 +242,7 @@ func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *r } // Add a matcher for the reply to the pending reply queue. Pongs are matched if they // reference the ping we're about to send. - rm := t.pending(toid, toaddr.IP, v4wire.PongPacket, func(p v4wire.Packet) (matched bool, requestDone bool) { + rm := t.pending(toid, toaddr.Addr(), v4wire.PongPacket, func(p v4wire.Packet) (matched bool, requestDone bool) { matched = bytes.Equal(p.(*v4wire.Pong).ReplyTok, hash) if matched && callback != nil { callback() @@ -241,12 +250,13 @@ func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *r return matched, matched }) // Send the packet. - t.localNode.UDPContact(toaddr) + toUDPAddr := &net.UDPAddr{IP: toaddr.Addr().AsSlice()} + t.localNode.UDPContact(toUDPAddr) t.write(toaddr, toid, req.Name(), packet) return rm } -func (t *UDPv4) makePing(toaddr *net.UDPAddr) *v4wire.Ping { +func (t *UDPv4) makePing(toaddr netip.AddrPort) *v4wire.Ping { return &v4wire.Ping{ Version: 4, From: t.ourEndpoint(), @@ -290,35 +300,39 @@ func (t *UDPv4) newRandomLookup(ctx context.Context) *lookup { func (t *UDPv4) newLookup(ctx context.Context, targetKey encPubkey) *lookup { target := enode.ID(crypto.Keccak256Hash(targetKey[:])) ekey := v4wire.Pubkey(targetKey) - it := newLookup(ctx, t.tab, target, func(n *node) ([]*node, error) { - return t.findnode(n.ID(), n.addr(), ekey) + it := newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) { + addr, ok := n.UDPEndpoint() + if !ok { + return nil, errNoUDPEndpoint + } + return t.findnode(n.ID(), addr, ekey) }) return it } // findnode sends a findnode request to the given node and waits until // the node has sent up to k neighbors. -func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target v4wire.Pubkey) ([]*node, error) { - t.ensureBond(toid, toaddr) +func (t *UDPv4) findnode(toid enode.ID, toAddrPort netip.AddrPort, target v4wire.Pubkey) ([]*enode.Node, error) { + t.ensureBond(toid, toAddrPort) // Add a matcher for 'neighbours' replies to the pending reply queue. The matcher is // active until enough nodes have been received. - nodes := make([]*node, 0, bucketSize) + nodes := make([]*enode.Node, 0, bucketSize) nreceived := 0 - rm := t.pending(toid, toaddr.IP, v4wire.NeighborsPacket, func(r v4wire.Packet) (matched bool, requestDone bool) { + rm := t.pending(toid, toAddrPort.Addr(), v4wire.NeighborsPacket, func(r v4wire.Packet) (matched bool, requestDone bool) { reply := r.(*v4wire.Neighbors) for _, rn := range reply.Nodes { nreceived++ - n, err := t.nodeFromRPC(toaddr, rn) + n, err := t.nodeFromRPC(toAddrPort, rn) if err != nil { - t.log.Trace("Invalid neighbor node received", "ip", rn.IP, "addr", toaddr, "err", err) + t.log.Trace("Invalid neighbor node received", "ip", rn.IP, "addr", toAddrPort, "err", err) continue } nodes = append(nodes, n) } return true, nreceived >= bucketSize }) - t.send(toaddr, toid, &v4wire.Findnode{ + t.send(toAddrPort, toid, &v4wire.Findnode{ Target: target, Expiration: uint64(time.Now().Add(expiration).Unix()), }) @@ -336,7 +350,7 @@ func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target v4wire.Pubke // RequestENR sends ENRRequest to the given node and waits for a response. func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { - addr := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} + addr, _ := n.UDPEndpoint() t.ensureBond(n.ID(), addr) req := &v4wire.ENRRequest{ @@ -349,7 +363,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { // Add a matcher for the reply to the pending reply queue. Responses are matched if // they reference the request we're about to send. - rm := t.pending(n.ID(), addr.IP, v4wire.ENRResponsePacket, func(r v4wire.Packet) (matched bool, requestDone bool) { + rm := t.pending(n.ID(), addr.Addr(), v4wire.ENRResponsePacket, func(r v4wire.Packet) (matched bool, requestDone bool) { matched = bytes.Equal(r.(*v4wire.ENRResponse).ReplyTok, hash) return matched, matched }) @@ -369,15 +383,19 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { if respN.Seq() < n.Seq() { return n, nil // response record is older } - if err := netutil.CheckRelayIP(addr.IP, respN.IP()); err != nil { + if err := netutil.CheckRelayIP(addr.Addr().AsSlice(), respN.IP()); err != nil { return nil, fmt.Errorf("invalid IP in response record: %v", err) } return respN, nil } +func (t *UDPv4) TableBuckets() [][]BucketNode { + return t.tab.Nodes() +} + // pending adds a reply matcher to the pending reply queue. // see the documentation of type replyMatcher for a detailed explanation. -func (t *UDPv4) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchFunc) *replyMatcher { +func (t *UDPv4) pending(id enode.ID, ip netip.Addr, ptype byte, callback replyMatchFunc) *replyMatcher { ch := make(chan error, 1) p := &replyMatcher{from: id, ip: ip, ptype: ptype, callback: callback, errc: ch} select { @@ -391,7 +409,7 @@ func (t *UDPv4) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchF // handleReply dispatches a reply packet, invoking reply matchers. It returns // whether any matcher considered the packet acceptable. -func (t *UDPv4) handleReply(from enode.ID, fromIP net.IP, req v4wire.Packet) bool { +func (t *UDPv4) handleReply(from enode.ID, fromIP netip.Addr, req v4wire.Packet) bool { matched := make(chan bool, 1) select { case t.gotreply <- reply{from, fromIP, req, matched}: @@ -457,7 +475,7 @@ func (t *UDPv4) loop() { var matched bool // whether any replyMatcher considered the reply acceptable. for el := plist.Front(); el != nil; el = el.Next() { p := el.Value.(*replyMatcher) - if p.from == r.from && p.ptype == r.data.Kind() && p.ip.Equal(r.ip) { + if p.from == r.from && p.ptype == r.data.Kind() && p.ip == r.ip { ok, requestDone := p.callback(r.data) matched = matched || ok p.reply = r.data @@ -496,7 +514,7 @@ func (t *UDPv4) loop() { } } -func (t *UDPv4) send(toaddr *net.UDPAddr, toid enode.ID, req v4wire.Packet) ([]byte, error) { +func (t *UDPv4) send(toaddr netip.AddrPort, toid enode.ID, req v4wire.Packet) ([]byte, error) { packet, hash, err := v4wire.Encode(t.priv, req) if err != nil { return hash, err @@ -504,8 +522,8 @@ func (t *UDPv4) send(toaddr *net.UDPAddr, toid enode.ID, req v4wire.Packet) ([]b return hash, t.write(toaddr, toid, req.Name(), packet) } -func (t *UDPv4) write(toaddr *net.UDPAddr, toid enode.ID, what string, packet []byte) error { - _, err := t.conn.WriteToUDP(packet, toaddr) +func (t *UDPv4) write(toaddr netip.AddrPort, toid enode.ID, what string, packet []byte) error { + _, err := t.conn.WriteToUDPAddrPort(packet, toaddr) t.log.Trace(">> "+what, "id", toid, "addr", toaddr, "err", err) return err } @@ -519,7 +537,7 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) { buf := make([]byte, maxPacketSize) for { - nbytes, from, err := t.conn.ReadFromUDP(buf) + nbytes, from, err := t.conn.ReadFromUDPAddrPort(buf) if netutil.IsTemporaryError(err) { // Ignore temporary read errors. t.log.Debug("Temporary UDP read error", "err", err) @@ -540,7 +558,7 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) { } } -func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error { +func (t *UDPv4) handlePacket(from netip.AddrPort, buf []byte) error { rawpacket, fromKey, hash, err := v4wire.Decode(buf) if err != nil { t.log.Debug("Bad discv4 packet", "addr", from, "err", err) @@ -559,15 +577,16 @@ func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error { } // checkBond checks if the given node has a recent enough endpoint proof. -func (t *UDPv4) checkBond(id enode.ID, ip net.IP) bool { - return time.Since(t.db.LastPongReceived(id, ip)) < bondExpiration +func (t *UDPv4) checkBond(id enode.ID, ip netip.AddrPort) bool { + return time.Since(t.db.LastPongReceived(id, ip.Addr().AsSlice())) < bondExpiration } // ensureBond solicits a ping from a node if we haven't seen a ping from it for a while. // This ensures there is a valid endpoint proof on the remote end. -func (t *UDPv4) ensureBond(toid enode.ID, toaddr *net.UDPAddr) { - tooOld := time.Since(t.db.LastPingReceived(toid, toaddr.IP)) > bondExpiration - if tooOld || t.db.FindFails(toid, toaddr.IP) > maxFindnodeFailures { +func (t *UDPv4) ensureBond(toid enode.ID, toaddr netip.AddrPort) { + ip := toaddr.Addr().AsSlice() + tooOld := time.Since(t.db.LastPingReceived(toid, ip)) > bondExpiration + if tooOld || t.db.FindFails(toid, ip) > maxFindnodeFailures { rm := t.sendPing(toid, toaddr, nil) <-rm.errc // Wait for them to ping back and process our pong. @@ -575,11 +594,11 @@ func (t *UDPv4) ensureBond(toid enode.ID, toaddr *net.UDPAddr) { } } -func (t *UDPv4) nodeFromRPC(sender *net.UDPAddr, rn v4wire.Node) (*node, error) { +func (t *UDPv4) nodeFromRPC(sender netip.AddrPort, rn v4wire.Node) (*enode.Node, error) { if rn.UDP <= 1024 { return nil, errLowPort } - if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil { + if err := netutil.CheckRelayIP(sender.Addr().AsSlice(), rn.IP); err != nil { return nil, err } if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) { @@ -589,12 +608,12 @@ func (t *UDPv4) nodeFromRPC(sender *net.UDPAddr, rn v4wire.Node) (*node, error) if err != nil { return nil, err } - n := wrapNode(enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP))) + n := enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP)) err = n.ValidateComplete() return n, err } -func nodeToRPC(n *node) v4wire.Node { +func nodeToRPC(n *enode.Node) v4wire.Node { var key ecdsa.PublicKey var ekey v4wire.Pubkey if err := n.Load((*enode.Secp256k1)(&key)); err == nil { @@ -633,14 +652,14 @@ type packetHandlerV4 struct { senderKey *ecdsa.PublicKey // used for ping // preverify checks whether the packet is valid and should be handled at all. - preverify func(p *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error + preverify func(p *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error // handle handles the packet. - handle func(req *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) + handle func(req *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) } // PING/v4 -func (t *UDPv4) verifyPing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyPing(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Ping) if v4wire.Expired(req.Expiration) { @@ -654,7 +673,7 @@ func (t *UDPv4) verifyPing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.I return nil } -func (t *UDPv4) handlePing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) { +func (t *UDPv4) handlePing(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { req := h.Packet.(*v4wire.Ping) // Reply. @@ -666,45 +685,51 @@ func (t *UDPv4) handlePing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.I }) // Ping back if our last pong on file is too far in the past. - n := wrapNode(enode.NewV4(h.senderKey, from.IP, int(req.From.TCP), from.Port)) - if time.Since(t.db.LastPongReceived(n.ID(), from.IP)) > bondExpiration { + fromIP := from.Addr().AsSlice() + n := enode.NewV4(h.senderKey, fromIP, int(req.From.TCP), int(from.Port())) + if time.Since(t.db.LastPongReceived(n.ID(), fromIP)) > bondExpiration { t.sendPing(fromID, from, func() { - t.tab.addVerifiedNode(n) + t.tab.addInboundNode(n) }) } else { - t.tab.addVerifiedNode(n) + t.tab.addInboundNode(n) } // Update node database and endpoint predictor. - t.db.UpdateLastPingReceived(n.ID(), from.IP, time.Now()) - t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)}) + t.db.UpdateLastPingReceived(n.ID(), fromIP, time.Now()) + fromUDPAddr := &net.UDPAddr{IP: fromIP, Port: int(from.Port())} + toUDPAddr := &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)} + t.localNode.UDPEndpointStatement(fromUDPAddr, toUDPAddr) } // PONG/v4 -func (t *UDPv4) verifyPong(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyPong(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Pong) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.handleReply(fromID, from.IP, req) { + if !t.handleReply(fromID, from.Addr(), req) { return errUnsolicitedReply } - t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)}) - t.db.UpdateLastPongReceived(fromID, from.IP, time.Now()) + fromIP := from.Addr().AsSlice() + fromUDPAddr := &net.UDPAddr{IP: fromIP, Port: int(from.Port())} + toUDPAddr := &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)} + t.localNode.UDPEndpointStatement(fromUDPAddr, toUDPAddr) + t.db.UpdateLastPongReceived(fromID, fromIP, time.Now()) return nil } // FINDNODE/v4 -func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Findnode) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.checkBond(fromID, from.IP) { + if !t.checkBond(fromID, from) { // No endpoint proof pong exists, we don't process the packet. This prevents an // attack vector where the discovery protocol could be used to amplify traffic in a // DDOS attack. A malicious actor would send a findnode request with the IP address @@ -716,7 +741,7 @@ func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno return nil } -func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) { +func (t *UDPv4) handleFindnode(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { req := h.Packet.(*v4wire.Findnode) // Determine closest nodes. @@ -728,7 +753,8 @@ func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno p := v4wire.Neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())} var sent bool for _, n := range closest { - if netutil.CheckRelayIP(from.IP, n.IP()) == nil { + fromIP := from.Addr().AsSlice() + if netutil.CheckRelayIP(fromIP, n.IP()) == nil { p.Nodes = append(p.Nodes, nodeToRPC(n)) } if len(p.Nodes) == v4wire.MaxNeighbors { @@ -744,13 +770,13 @@ func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno // NEIGHBORS/v4 -func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Neighbors) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.handleReply(fromID, from.IP, h.Packet) { + if !t.handleReply(fromID, from.Addr(), h.Packet) { return errUnsolicitedReply } return nil @@ -758,19 +784,19 @@ func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from *net.UDPAddr, fromID en // ENRREQUEST/v4 -func (t *UDPv4) verifyENRRequest(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyENRRequest(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.ENRRequest) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.checkBond(fromID, from.IP) { + if !t.checkBond(fromID, from) { return errUnknownNode } return nil } -func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) { +func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { t.send(from, fromID, &v4wire.ENRResponse{ ReplyTok: mac, Record: *t.localNode.Node().Record(), @@ -779,8 +805,8 @@ func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from *net.UDPAddr, fromID e // ENRRESPONSE/v4 -func (t *UDPv4) verifyENRResponse(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { - if !t.handleReply(fromID, from.IP, h.Packet) { +func (t *UDPv4) verifyENRResponse(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { + if !t.handleReply(fromID, from.Addr(), h.Packet) { return errUnsolicitedReply } return nil diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 9b80214f7552..28a6fb867596 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -26,6 +26,7 @@ import ( "io" "math/rand" "net" + "net/netip" "reflect" "sync" "testing" @@ -55,7 +56,7 @@ type udpTest struct { udp *UDPv4 sent [][]byte localkey, remotekey *ecdsa.PrivateKey - remoteaddr *net.UDPAddr + remoteaddr netip.AddrPort } func newUDPTest(t *testing.T) *udpTest { @@ -64,7 +65,7 @@ func newUDPTest(t *testing.T) *udpTest { pipe: newpipe(), localkey: newkey(), remotekey: newkey(), - remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303}, + remoteaddr: netip.MustParseAddrPort("10.0.1.99:30303"), } test.db, _ = enode.OpenDB("") @@ -92,7 +93,7 @@ func (test *udpTest) packetIn(wantError error, data v4wire.Packet) { } // handles a packet as if it had been sent to the transport by the key/endpoint. -func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr *net.UDPAddr, data v4wire.Packet) { +func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr netip.AddrPort, data v4wire.Packet) { test.t.Helper() enc, _, err := v4wire.Encode(key, data) @@ -106,7 +107,7 @@ func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr * } // waits for a packet to be sent by the transport. -// validate should have type func(X, *net.UDPAddr, []byte), where X is a packet type. +// validate should have type func(X, netip.AddrPort, []byte), where X is a packet type. func (test *udpTest) waitPacketOut(validate interface{}) (closed bool) { test.t.Helper() @@ -128,7 +129,7 @@ func (test *udpTest) waitPacketOut(validate interface{}) (closed bool) { test.t.Errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype) return false } - fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(&dgram.to), reflect.ValueOf(hash)}) + fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(dgram.to), reflect.ValueOf(hash)}) return false } @@ -236,7 +237,7 @@ func TestUDPv4_findnodeTimeout(t *testing.T) { test := newUDPTest(t) defer test.close() - toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222} + toaddr := netip.AddrPortFrom(netip.MustParseAddr("1.2.3.4"), 2222) toid := enode.ID{1, 2, 3, 4} target := v4wire.Pubkey{4, 5, 6, 7} result, err := test.udp.findnode(toid, toaddr, target) @@ -261,26 +262,25 @@ func TestUDPv4_findnode(t *testing.T) { for i := 0; i < numCandidates; i++ { key := newkey() ip := net.IP{10, 13, 0, byte(i)} - n := wrapNode(enode.NewV4(&key.PublicKey, ip, 0, 2000)) + n := enode.NewV4(&key.PublicKey, ip, 0, 2000) // Ensure half of table content isn't verified live yet. if i > numCandidates/2 { - n.livenessChecks = 1 live[n.ID()] = true } + test.table.addFoundNode(n, live[n.ID()]) nodes.push(n, numCandidates) } - fillTable(test.table, nodes.entries, false) // ensure there's a bond with the test node, // findnode won't be accepted otherwise. remoteID := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID() - test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.IP, time.Now()) + test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.Addr().AsSlice(), time.Now()) // check that closest neighbors are returned. expected := test.table.findnodeByID(testTarget.ID(), bucketSize, true) test.packetIn(nil, &v4wire.Findnode{Target: testTarget, Expiration: futureExp}) - waitNeighbors := func(want []*node) { - test.waitPacketOut(func(p *v4wire.Neighbors, to *net.UDPAddr, hash []byte) { + waitNeighbors := func(want []*enode.Node) { + test.waitPacketOut(func(p *v4wire.Neighbors, to netip.AddrPort, hash []byte) { if len(p.Nodes) != len(want) { t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), len(want)) return @@ -309,10 +309,10 @@ func TestUDPv4_findnodeMultiReply(t *testing.T) { defer test.close() rid := enode.PubkeyToIDV4(&test.remotekey.PublicKey) - test.table.db.UpdateLastPingReceived(rid, test.remoteaddr.IP, time.Now()) + test.table.db.UpdateLastPingReceived(rid, test.remoteaddr.Addr().AsSlice(), time.Now()) // queue a pending findnode request - resultc, errc := make(chan []*node, 1), make(chan error, 1) + resultc, errc := make(chan []*enode.Node, 1), make(chan error, 1) go func() { rid := encodePubkey(&test.remotekey.PublicKey).id() ns, err := test.udp.findnode(rid, test.remoteaddr, testTarget) @@ -325,18 +325,18 @@ func TestUDPv4_findnodeMultiReply(t *testing.T) { // wait for the findnode to be sent. // after it is sent, the transport is waiting for a reply - test.waitPacketOut(func(p *v4wire.Findnode, to *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Findnode, to netip.AddrPort, hash []byte) { if p.Target != testTarget { t.Errorf("wrong target: got %v, want %v", p.Target, testTarget) } }) // send the reply as two packets. - list := []*node{ - wrapNode(enode.MustParse("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304")), - wrapNode(enode.MustParse("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303")), - wrapNode(enode.MustParse("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17")), - wrapNode(enode.MustParse("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303")), + list := []*enode.Node{ + enode.MustParse("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304"), + enode.MustParse("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303"), + enode.MustParse("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17"), + enode.MustParse("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303"), } rpclist := make([]v4wire.Node, len(list)) for i := range list { @@ -368,8 +368,8 @@ func TestUDPv4_pingMatch(t *testing.T) { crand.Read(randToken) test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - test.waitPacketOut(func(*v4wire.Pong, *net.UDPAddr, []byte) {}) - test.waitPacketOut(func(*v4wire.Ping, *net.UDPAddr, []byte) {}) + test.waitPacketOut(func(*v4wire.Pong, netip.AddrPort, []byte) {}) + test.waitPacketOut(func(*v4wire.Ping, netip.AddrPort, []byte) {}) test.packetIn(errUnsolicitedReply, &v4wire.Pong{ReplyTok: randToken, To: testLocalAnnounced, Expiration: futureExp}) } @@ -379,10 +379,10 @@ func TestUDPv4_pingMatchIP(t *testing.T) { defer test.close() test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - test.waitPacketOut(func(*v4wire.Pong, *net.UDPAddr, []byte) {}) + test.waitPacketOut(func(*v4wire.Pong, netip.AddrPort, []byte) {}) - test.waitPacketOut(func(p *v4wire.Ping, to *net.UDPAddr, hash []byte) { - wrongAddr := &net.UDPAddr{IP: net.IP{33, 44, 1, 2}, Port: 30000} + test.waitPacketOut(func(p *v4wire.Ping, to netip.AddrPort, hash []byte) { + wrongAddr := netip.MustParseAddrPort("33.44.1.2:30000") test.packetInFrom(errUnsolicitedReply, test.remotekey, wrongAddr, &v4wire.Pong{ ReplyTok: hash, To: testLocalAnnounced, @@ -393,41 +393,36 @@ func TestUDPv4_pingMatchIP(t *testing.T) { func TestUDPv4_successfulPing(t *testing.T) { test := newUDPTest(t) - added := make(chan *node, 1) - test.table.nodeAddedHook = func(b *bucket, n *node) { added <- n } + added := make(chan *tableNode, 1) + test.table.nodeAddedHook = func(b *bucket, n *tableNode) { added <- n } defer test.close() // The remote side sends a ping packet to initiate the exchange. go test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) // The ping is replied to. - test.waitPacketOut(func(p *v4wire.Pong, to *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Pong, to netip.AddrPort, hash []byte) { pinghash := test.sent[0][:32] if !bytes.Equal(p.ReplyTok, pinghash) { t.Errorf("got pong.ReplyTok %x, want %x", p.ReplyTok, pinghash) } - wantTo := v4wire.Endpoint{ - // The mirrored UDP address is the UDP packet sender - IP: test.remoteaddr.IP, UDP: uint16(test.remoteaddr.Port), - // The mirrored TCP port is the one from the ping packet - TCP: testRemote.TCP, - } + // The mirrored UDP address is the UDP packet sender. + // The mirrored TCP port is the one from the ping packet. + wantTo := v4wire.NewEndpoint(test.remoteaddr, testRemote.TCP) if !reflect.DeepEqual(p.To, wantTo) { t.Errorf("got pong.To %v, want %v", p.To, wantTo) } }) // Remote is unknown, the table pings back. - test.waitPacketOut(func(p *v4wire.Ping, to *net.UDPAddr, hash []byte) { - if !reflect.DeepEqual(p.From, test.udp.ourEndpoint()) { + test.waitPacketOut(func(p *v4wire.Ping, to netip.AddrPort, hash []byte) { + wantFrom := test.udp.ourEndpoint() + wantFrom.IP = net.IP{} + if !reflect.DeepEqual(p.From, wantFrom) { t.Errorf("got ping.From %#v, want %#v", p.From, test.udp.ourEndpoint()) } - wantTo := v4wire.Endpoint{ - // The mirrored UDP address is the UDP packet sender. - IP: test.remoteaddr.IP, - UDP: uint16(test.remoteaddr.Port), - TCP: 0, - } + // The mirrored UDP address is the UDP packet sender. + wantTo := v4wire.NewEndpoint(test.remoteaddr, 0) if !reflect.DeepEqual(p.To, wantTo) { t.Errorf("got ping.To %v, want %v", p.To, wantTo) } @@ -442,11 +437,11 @@ func TestUDPv4_successfulPing(t *testing.T) { if n.ID() != rid { t.Errorf("node has wrong ID: got %v, want %v", n.ID(), rid) } - if !n.IP().Equal(test.remoteaddr.IP) { - t.Errorf("node has wrong IP: got %v, want: %v", n.IP(), test.remoteaddr.IP) + if !n.IP().Equal(test.remoteaddr.Addr().AsSlice()) { + t.Errorf("node has wrong IP: got %v, want: %v", n.IP(), test.remoteaddr.Addr()) } - if n.UDP() != test.remoteaddr.Port { - t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port) + if n.UDP() != int(test.remoteaddr.Port()) { + t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port()) } if n.TCP() != int(testRemote.TCP) { t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP(), testRemote.TCP) @@ -469,12 +464,12 @@ func TestUDPv4_EIP868(t *testing.T) { // Perform endpoint proof and check for sequence number in packet tail. test.packetIn(nil, &v4wire.Ping{Expiration: futureExp}) - test.waitPacketOut(func(p *v4wire.Pong, addr *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Pong, addr netip.AddrPort, hash []byte) { if p.ENRSeq != wantNode.Seq() { t.Errorf("wrong sequence number in pong: %d, want %d", p.ENRSeq, wantNode.Seq()) } }) - test.waitPacketOut(func(p *v4wire.Ping, addr *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Ping, addr netip.AddrPort, hash []byte) { if p.ENRSeq != wantNode.Seq() { t.Errorf("wrong sequence number in ping: %d, want %d", p.ENRSeq, wantNode.Seq()) } @@ -483,7 +478,7 @@ func TestUDPv4_EIP868(t *testing.T) { // Request should work now. test.packetIn(nil, &v4wire.ENRRequest{Expiration: futureExp}) - test.waitPacketOut(func(p *v4wire.ENRResponse, addr *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.ENRResponse, addr netip.AddrPort, hash []byte) { n, err := enode.New(enode.ValidSchemes, &p.Record) if err != nil { t.Fatalf("invalid record: %v", err) @@ -584,7 +579,7 @@ type dgramPipe struct { } type dgram struct { - to net.UDPAddr + to netip.AddrPort data []byte } @@ -597,8 +592,8 @@ func newpipe() *dgramPipe { } } -// WriteToUDP queues a datagram. -func (c *dgramPipe) WriteToUDP(b []byte, to *net.UDPAddr) (n int, err error) { +// WriteToUDPAddrPort queues a datagram. +func (c *dgramPipe) WriteToUDPAddrPort(b []byte, to netip.AddrPort) (n int, err error) { msg := make([]byte, len(b)) copy(msg, b) c.mu.Lock() @@ -606,15 +601,15 @@ func (c *dgramPipe) WriteToUDP(b []byte, to *net.UDPAddr) (n int, err error) { if c.closed { return 0, errors.New("closed") } - c.queue = append(c.queue, dgram{*to, b}) + c.queue = append(c.queue, dgram{to, b}) c.cond.Signal() return len(b), nil } -// ReadFromUDP just hangs until the pipe is closed. -func (c *dgramPipe) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { +// ReadFromUDPAddrPort just hangs until the pipe is closed. +func (c *dgramPipe) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { <-c.closing - return 0, nil, io.EOF + return 0, netip.AddrPort{}, io.EOF } func (c *dgramPipe) Close() error { diff --git a/p2p/discover/v4wire/v4wire.go b/p2p/discover/v4wire/v4wire.go index 9c59359fb2c2..958cca324d64 100644 --- a/p2p/discover/v4wire/v4wire.go +++ b/p2p/discover/v4wire/v4wire.go @@ -25,6 +25,7 @@ import ( "fmt" "math/big" "net" + "net/netip" "time" "github.com/ethereum/go-ethereum/common/math" @@ -150,14 +151,15 @@ type Endpoint struct { } // NewEndpoint creates an endpoint. -func NewEndpoint(addr *net.UDPAddr, tcpPort uint16) Endpoint { - ip := net.IP{} - if ip4 := addr.IP.To4(); ip4 != nil { - ip = ip4 - } else if ip6 := addr.IP.To16(); ip6 != nil { - ip = ip6 +func NewEndpoint(addr netip.AddrPort, tcpPort uint16) Endpoint { + var ip net.IP + if addr.Addr().Is4() || addr.Addr().Is4In6() { + ip4 := addr.Addr().As4() + ip = ip4[:] + } else { + ip = addr.Addr().AsSlice() } - return Endpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort} + return Endpoint{IP: ip, UDP: addr.Port(), TCP: tcpPort} } type Packet interface { diff --git a/p2p/discover/v5_talk.go b/p2p/discover/v5_talk.go index c1f67879402c..2246b47141c0 100644 --- a/p2p/discover/v5_talk.go +++ b/p2p/discover/v5_talk.go @@ -18,6 +18,7 @@ package discover import ( "net" + "net/netip" "sync" "time" @@ -70,7 +71,7 @@ func (t *talkSystem) register(protocol string, handler TalkRequestHandler) { } // handleRequest handles a talk request. -func (t *talkSystem) handleRequest(id enode.ID, addr *net.UDPAddr, req *v5wire.TalkRequest) { +func (t *talkSystem) handleRequest(id enode.ID, addr netip.AddrPort, req *v5wire.TalkRequest) { t.mutex.Lock() handler, ok := t.handlers[req.Protocol] t.mutex.Unlock() @@ -88,7 +89,8 @@ func (t *talkSystem) handleRequest(id enode.ID, addr *net.UDPAddr, req *v5wire.T case <-t.slots: go func() { defer func() { t.slots <- struct{}{} }() - respMessage := handler(id, addr, req.Message) + udpAddr := &net.UDPAddr{IP: addr.Addr().AsSlice(), Port: int(addr.Port())} + respMessage := handler(id, udpAddr, req.Message) resp := &v5wire.TalkResponse{ReqID: req.ReqID, Message: respMessage} t.transport.sendFromAnotherThread(id, addr, resp) }() diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index 20a8bccd058e..9ba54b3d400a 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "net" + "net/netip" "slices" "sync" "time" @@ -101,14 +102,14 @@ type UDPv5 struct { type sendRequest struct { destID enode.ID - destAddr *net.UDPAddr + destAddr netip.AddrPort msg v5wire.Packet } // callV5 represents a remote procedure call against another node. type callV5 struct { id enode.ID - addr *net.UDPAddr + addr netip.AddrPort node *enode.Node // This is required to perform handshakes. packet v5wire.Packet @@ -175,7 +176,7 @@ func newUDPv5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { cancelCloseCtx: cancelCloseCtx, } t.talk = newTalkSystem(t) - tab, err := newMeteredTable(t, t.db, cfg) + tab, err := newTable(t, t.db, cfg) if err != nil { return nil, err } @@ -233,7 +234,7 @@ func (t *UDPv5) AllNodes() []*enode.Node { for _, b := range &t.tab.buckets { for _, n := range b.entries { - nodes = append(nodes, unwrapNode(n)) + nodes = append(nodes, n.Node) } } return nodes @@ -266,7 +267,7 @@ func (t *UDPv5) TalkRequest(n *enode.Node, protocol string, request []byte) ([]b } // TalkRequestToID sends a talk request to a node and waits for a response. -func (t *UDPv5) TalkRequestToID(id enode.ID, addr *net.UDPAddr, protocol string, request []byte) ([]byte, error) { +func (t *UDPv5) TalkRequestToID(id enode.ID, addr netip.AddrPort, protocol string, request []byte) ([]byte, error) { req := &v5wire.TalkRequest{Protocol: protocol, Message: request} resp := t.callToID(id, addr, v5wire.TalkResponseMsg, req) defer t.callDone(resp) @@ -314,26 +315,26 @@ func (t *UDPv5) newRandomLookup(ctx context.Context) *lookup { } func (t *UDPv5) newLookup(ctx context.Context, target enode.ID) *lookup { - return newLookup(ctx, t.tab, target, func(n *node) ([]*node, error) { + return newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) { return t.lookupWorker(n, target) }) } // lookupWorker performs FINDNODE calls against a single node during lookup. -func (t *UDPv5) lookupWorker(destNode *node, target enode.ID) ([]*node, error) { +func (t *UDPv5) lookupWorker(destNode *enode.Node, target enode.ID) ([]*enode.Node, error) { var ( dists = lookupDistances(target, destNode.ID()) nodes = nodesByDistance{target: target} err error ) var r []*enode.Node - r, err = t.findnode(unwrapNode(destNode), dists) + r, err = t.findnode(destNode, dists) if errors.Is(err, errClosed) { return nil, err } for _, n := range r { if n.ID() != t.Self().ID() { - nodes.push(wrapNode(n), findnodeResultLimit) + nodes.push(n, findnodeResultLimit) } } return nodes.entries, err @@ -427,7 +428,7 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s if err != nil { return nil, err } - if err := netutil.CheckRelayIP(c.addr.IP, node.IP()); err != nil { + if err := netutil.CheckRelayIP(c.addr.Addr().AsSlice(), node.IP()); err != nil { return nil, err } if t.netrestrict != nil && !t.netrestrict.Contains(node.IP()) { @@ -452,14 +453,14 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s // callToNode sends the given call and sets up a handler for response packets (of message // type responseType). Responses are dispatched to the call's response channel. func (t *UDPv5) callToNode(n *enode.Node, responseType byte, req v5wire.Packet) *callV5 { - addr := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} + addr, _ := n.UDPEndpoint() c := &callV5{id: n.ID(), addr: addr, node: n} t.initCall(c, responseType, req) return c } // callToID is like callToNode, but for cases where the node record is not available. -func (t *UDPv5) callToID(id enode.ID, addr *net.UDPAddr, responseType byte, req v5wire.Packet) *callV5 { +func (t *UDPv5) callToID(id enode.ID, addr netip.AddrPort, responseType byte, req v5wire.Packet) *callV5 { c := &callV5{id: id, addr: addr} t.initCall(c, responseType, req) return c @@ -619,12 +620,12 @@ func (t *UDPv5) sendCall(c *callV5) { // sendResponse sends a response packet to the given node. // This doesn't trigger a handshake even if no keys are available. -func (t *UDPv5) sendResponse(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet) error { +func (t *UDPv5) sendResponse(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet) error { _, err := t.send(toID, toAddr, packet, nil) return err } -func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet) { +func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet) { select { case t.sendCh <- sendRequest{toID, toAddr, packet}: case <-t.closeCtx.Done(): @@ -632,7 +633,7 @@ func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr *net.UDPAddr, packet } // send sends a packet to the given node. -func (t *UDPv5) send(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet, c *v5wire.Whoareyou) (v5wire.Nonce, error) { +func (t *UDPv5) send(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet, c *v5wire.Whoareyou) (v5wire.Nonce, error) { addr := toAddr.String() t.logcontext = append(t.logcontext[:0], "id", toID, "addr", addr) t.logcontext = packet.AppendLogInfo(t.logcontext) @@ -644,7 +645,7 @@ func (t *UDPv5) send(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet, c return nonce, err } - _, err = t.conn.WriteToUDP(enc, toAddr) + _, err = t.conn.WriteToUDPAddrPort(enc, toAddr) t.log.Trace(">> "+packet.Name(), t.logcontext...) return nonce, err } @@ -655,7 +656,7 @@ func (t *UDPv5) readLoop() { buf := make([]byte, maxPacketSize) for range t.readNextCh { - nbytes, from, err := t.conn.ReadFromUDP(buf) + nbytes, from, err := t.conn.ReadFromUDPAddrPort(buf) if netutil.IsTemporaryError(err) { // Ignore temporary read errors. t.log.Debug("Temporary UDP read error", "err", err) @@ -672,7 +673,7 @@ func (t *UDPv5) readLoop() { } // dispatchReadPacket sends a packet into the dispatch loop. -func (t *UDPv5) dispatchReadPacket(from *net.UDPAddr, content []byte) bool { +func (t *UDPv5) dispatchReadPacket(from netip.AddrPort, content []byte) bool { select { case t.packetInCh <- ReadPacket{content, from}: return true @@ -682,7 +683,7 @@ func (t *UDPv5) dispatchReadPacket(from *net.UDPAddr, content []byte) bool { } // handlePacket decodes and processes an incoming packet from the network. -func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { +func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr netip.AddrPort) error { addr := fromAddr.String() fromID, fromNode, packet, err := t.codec.Decode(rawpacket, addr) if err != nil { @@ -699,7 +700,7 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { } if fromNode != nil { // Handshake succeeded, add to table. - t.tab.addSeenNode(wrapNode(fromNode)) + t.tab.addInboundNode(fromNode) } if packet.Kind() != v5wire.WhoareyouPacket { // WHOAREYOU logged separately to report errors. @@ -712,13 +713,13 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { } // handleCallResponse dispatches a response packet to the call waiting for it. -func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr *net.UDPAddr, p v5wire.Packet) bool { +func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr netip.AddrPort, p v5wire.Packet) bool { ac := t.activeCallByNode[fromID] if ac == nil || !bytes.Equal(p.RequestID(), ac.reqid) { t.log.Debug(fmt.Sprintf("Unsolicited/late %s response", p.Name()), "id", fromID, "addr", fromAddr) return false } - if !fromAddr.IP.Equal(ac.addr.IP) || fromAddr.Port != ac.addr.Port { + if fromAddr != ac.addr { t.log.Debug(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr) return false } @@ -743,7 +744,7 @@ func (t *UDPv5) getNode(id enode.ID) *enode.Node { } // handle processes incoming packets according to their message type. -func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr *net.UDPAddr) { +func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr netip.AddrPort) { switch p := p.(type) { case *v5wire.Unknown: t.handleUnknown(p, fromID, fromAddr) @@ -753,7 +754,9 @@ func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr *net.UDPAddr) t.handlePing(p, fromID, fromAddr) case *v5wire.Pong: if t.handleCallResponse(fromID, fromAddr, p) { - t.localNode.UDPEndpointStatement(fromAddr, &net.UDPAddr{IP: p.ToIP, Port: int(p.ToPort)}) + fromUDPAddr := &net.UDPAddr{IP: fromAddr.Addr().AsSlice(), Port: int(fromAddr.Port())} + toUDPAddr := &net.UDPAddr{IP: p.ToIP, Port: int(p.ToPort)} + t.localNode.UDPEndpointStatement(fromUDPAddr, toUDPAddr) } case *v5wire.Findnode: t.handleFindnode(p, fromID, fromAddr) @@ -767,7 +770,7 @@ func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr *net.UDPAddr) } // handleUnknown initiates a handshake by responding with WHOAREYOU. -func (t *UDPv5) handleUnknown(p *v5wire.Unknown, fromID enode.ID, fromAddr *net.UDPAddr) { +func (t *UDPv5) handleUnknown(p *v5wire.Unknown, fromID enode.ID, fromAddr netip.AddrPort) { challenge := &v5wire.Whoareyou{Nonce: p.Nonce} crand.Read(challenge.IDNonce[:]) if n := t.getNode(fromID); n != nil { @@ -783,7 +786,7 @@ var ( ) // handleWhoareyou resends the active call as a handshake packet. -func (t *UDPv5) handleWhoareyou(p *v5wire.Whoareyou, fromID enode.ID, fromAddr *net.UDPAddr) { +func (t *UDPv5) handleWhoareyou(p *v5wire.Whoareyou, fromID enode.ID, fromAddr netip.AddrPort) { c, err := t.matchWithCall(fromID, p.Nonce) if err != nil { t.log.Debug("Invalid "+p.Name(), "addr", fromAddr, "err", err) @@ -817,32 +820,35 @@ func (t *UDPv5) matchWithCall(fromID enode.ID, nonce v5wire.Nonce) (*callV5, err } // handlePing sends a PONG response. -func (t *UDPv5) handlePing(p *v5wire.Ping, fromID enode.ID, fromAddr *net.UDPAddr) { - remoteIP := fromAddr.IP - // Handle IPv4 mapped IPv6 addresses in the - // event the local node is binded to an - // ipv6 interface. - if remoteIP.To4() != nil { - remoteIP = remoteIP.To4() +func (t *UDPv5) handlePing(p *v5wire.Ping, fromID enode.ID, fromAddr netip.AddrPort) { + var remoteIP net.IP + // Handle IPv4 mapped IPv6 addresses in the event the local node is binded + // to an ipv6 interface. + if fromAddr.Addr().Is4() || fromAddr.Addr().Is4In6() { + ip4 := fromAddr.Addr().As4() + remoteIP = ip4[:] + } else { + remoteIP = fromAddr.Addr().AsSlice() } t.sendResponse(fromID, fromAddr, &v5wire.Pong{ ReqID: p.ReqID, ToIP: remoteIP, - ToPort: uint16(fromAddr.Port), + ToPort: fromAddr.Port(), ENRSeq: t.localNode.Node().Seq(), }) } // handleFindnode returns nodes to the requester. -func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr *net.UDPAddr) { - nodes := t.collectTableNodes(fromAddr.IP, p.Distances, findnodeResultLimit) +func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr netip.AddrPort) { + nodes := t.collectTableNodes(fromAddr.Addr(), p.Distances, findnodeResultLimit) for _, resp := range packNodes(p.ReqID, nodes) { t.sendResponse(fromID, fromAddr, resp) } } // collectTableNodes creates a FINDNODE result set for the given distances. -func (t *UDPv5) collectTableNodes(rip net.IP, distances []uint, limit int) []*enode.Node { +func (t *UDPv5) collectTableNodes(rip netip.Addr, distances []uint, limit int) []*enode.Node { + ripSlice := rip.AsSlice() var bn []*enode.Node var nodes []*enode.Node var processed = make(map[uint]struct{}) @@ -857,7 +863,7 @@ func (t *UDPv5) collectTableNodes(rip net.IP, distances []uint, limit int) []*en for _, n := range t.tab.appendLiveNodes(dist, bn[:0]) { // Apply some pre-checks to avoid sending invalid nodes. // Note liveness is checked by appendLiveNodes. - if netutil.CheckRelayIP(rip, n.IP()) != nil { + if netutil.CheckRelayIP(ripSlice, n.IP()) != nil { continue } nodes = append(nodes, n) diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index 4373ea81847f..1f8e972200ae 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -23,6 +23,7 @@ import ( "fmt" "math/rand" "net" + "net/netip" "reflect" "slices" "testing" @@ -103,7 +104,7 @@ func TestUDPv5_pingHandling(t *testing.T) { defer test.close() test.packetIn(&v5wire.Ping{ReqID: []byte("foo")}) - test.waitPacketOut(func(p *v5wire.Pong, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Pong, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, []byte("foo")) { t.Error("wrong request ID in response:", p.ReqID) } @@ -135,16 +136,16 @@ func TestUDPv5_unknownPacket(t *testing.T) { // Unknown packet from unknown node. test.packetIn(&v5wire.Unknown{Nonce: nonce}) - test.waitPacketOut(func(p *v5wire.Whoareyou, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Whoareyou, addr netip.AddrPort, _ v5wire.Nonce) { check(p, 0) }) // Make node known. n := test.getNode(test.remotekey, test.remoteaddr).Node() - test.table.addSeenNode(wrapNode(n)) + test.table.addFoundNode(n, false) test.packetIn(&v5wire.Unknown{Nonce: nonce}) - test.waitPacketOut(func(p *v5wire.Whoareyou, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Whoareyou, addr netip.AddrPort, _ v5wire.Nonce) { check(p, n.Seq()) }) } @@ -159,9 +160,9 @@ func TestUDPv5_findnodeHandling(t *testing.T) { nodes253 := nodesAtDistance(test.table.self().ID(), 253, 16) nodes249 := nodesAtDistance(test.table.self().ID(), 249, 4) nodes248 := nodesAtDistance(test.table.self().ID(), 248, 10) - fillTable(test.table, wrapNodes(nodes253), true) - fillTable(test.table, wrapNodes(nodes249), true) - fillTable(test.table, wrapNodes(nodes248), true) + fillTable(test.table, nodes253, true) + fillTable(test.table, nodes249, true) + fillTable(test.table, nodes248, true) // Requesting with distance zero should return the node's own record. test.packetIn(&v5wire.Findnode{ReqID: []byte{0}, Distances: []uint{0}}) @@ -199,7 +200,7 @@ func (test *udpV5Test) expectNodes(wantReqID []byte, wantTotal uint8, wantNodes } for { - test.waitPacketOut(func(p *v5wire.Nodes, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Nodes, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, wantReqID) { test.t.Fatalf("wrong request ID %v in response, want %v", p.ReqID, wantReqID) } @@ -238,7 +239,7 @@ func TestUDPv5_pingCall(t *testing.T) { _, err := test.udp.ping(remote) done <- err }() - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) {}) + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) {}) if err := <-done; err != errTimeout { t.Fatalf("want errTimeout, got %q", err) } @@ -248,7 +249,7 @@ func TestUDPv5_pingCall(t *testing.T) { _, err := test.udp.ping(remote) done <- err }() - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { test.packetInFrom(test.remotekey, test.remoteaddr, &v5wire.Pong{ReqID: p.ReqID}) }) if err := <-done; err != nil { @@ -260,8 +261,8 @@ func TestUDPv5_pingCall(t *testing.T) { _, err := test.udp.ping(remote) done <- err }() - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { - wrongAddr := &net.UDPAddr{IP: net.IP{33, 44, 55, 22}, Port: 10101} + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { + wrongAddr := netip.MustParseAddrPort("33.44.55.22:10101") test.packetInFrom(test.remotekey, wrongAddr, &v5wire.Pong{ReqID: p.ReqID}) }) if err := <-done; err != errTimeout { @@ -291,7 +292,7 @@ func TestUDPv5_findnodeCall(t *testing.T) { }() // Serve the responses: - test.waitPacketOut(func(p *v5wire.Findnode, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Findnode, addr netip.AddrPort, _ v5wire.Nonce) { if !reflect.DeepEqual(p.Distances, distances) { t.Fatalf("wrong distances in request: %v", p.Distances) } @@ -337,15 +338,15 @@ func TestUDPv5_callResend(t *testing.T) { }() // Ping answered by WHOAREYOU. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, nonce v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) }) // Ping should be re-sent. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { test.packetIn(&v5wire.Pong{ReqID: p.ReqID}) }) // Answer the other ping. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { test.packetIn(&v5wire.Pong{ReqID: p.ReqID}) }) if err := <-done; err != nil { @@ -370,11 +371,11 @@ func TestUDPv5_multipleHandshakeRounds(t *testing.T) { }() // Ping answered by WHOAREYOU. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, nonce v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) }) // Ping answered by WHOAREYOU again. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, nonce v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) }) if err := <-done; err != errTimeout { @@ -401,7 +402,7 @@ func TestUDPv5_callTimeoutReset(t *testing.T) { }() // Serve two responses, slowly. - test.waitPacketOut(func(p *v5wire.Findnode, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Findnode, addr netip.AddrPort, _ v5wire.Nonce) { time.Sleep(respTimeout - 50*time.Millisecond) test.packetIn(&v5wire.Nodes{ ReqID: p.ReqID, @@ -439,7 +440,7 @@ func TestUDPv5_talkHandling(t *testing.T) { Protocol: "test", Message: []byte("test request"), }) - test.waitPacketOut(func(p *v5wire.TalkResponse, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkResponse, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, []byte("foo")) { t.Error("wrong request ID in response:", p.ReqID) } @@ -458,7 +459,7 @@ func TestUDPv5_talkHandling(t *testing.T) { Protocol: "wrong", Message: []byte("test request"), }) - test.waitPacketOut(func(p *v5wire.TalkResponse, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkResponse, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, []byte("2")) { t.Error("wrong request ID in response:", p.ReqID) } @@ -485,7 +486,7 @@ func TestUDPv5_talkRequest(t *testing.T) { _, err := test.udp.TalkRequest(remote, "test", []byte("test request")) done <- err }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr *net.UDPAddr, _ v5wire.Nonce) {}) + test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) {}) if err := <-done; err != errTimeout { t.Fatalf("want errTimeout, got %q", err) } @@ -495,7 +496,7 @@ func TestUDPv5_talkRequest(t *testing.T) { _, err := test.udp.TalkRequest(remote, "test", []byte("test request")) done <- err }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) { if p.Protocol != "test" { t.Errorf("wrong protocol ID in talk request: %q", p.Protocol) } @@ -516,7 +517,7 @@ func TestUDPv5_talkRequest(t *testing.T) { _, err := test.udp.TalkRequestToID(remote.ID(), test.remoteaddr, "test", []byte("test request 2")) done <- err }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) { if p.Protocol != "test" { t.Errorf("wrong protocol ID in talk request: %q", p.Protocol) } @@ -583,13 +584,14 @@ func TestUDPv5_lookup(t *testing.T) { for d, nn := range lookupTestnet.dists { for i, key := range nn { n := lookupTestnet.node(d, i) - test.getNode(key, &net.UDPAddr{IP: n.IP(), Port: n.UDP()}) + addr, _ := n.UDPEndpoint() + test.getNode(key, addr) } } // Seed table with initial node. initialNode := lookupTestnet.node(256, 0) - fillTable(test.table, []*node{wrapNode(initialNode)}, true) + fillTable(test.table, []*enode.Node{initialNode}, true) // Start the lookup. resultC := make(chan []*enode.Node, 1) @@ -601,7 +603,7 @@ func TestUDPv5_lookup(t *testing.T) { // Answer lookup packets. asked := make(map[enode.ID]bool) for done := false; !done; { - done = test.waitPacketOut(func(p v5wire.Packet, to *net.UDPAddr, _ v5wire.Nonce) { + done = test.waitPacketOut(func(p v5wire.Packet, to netip.AddrPort, _ v5wire.Nonce) { recipient, key := lookupTestnet.nodeByAddr(to) switch p := p.(type) { case *v5wire.Ping: @@ -652,11 +654,8 @@ func TestUDPv5_PingWithIPV4MappedAddress(t *testing.T) { test := newUDPV5Test(t) defer test.close() - rawIP := net.IPv4(0xFF, 0x12, 0x33, 0xE5) - test.remoteaddr = &net.UDPAddr{ - IP: rawIP.To16(), - Port: 0, - } + rawIP := netip.AddrFrom4([4]byte{0xFF, 0x12, 0x33, 0xE5}) + test.remoteaddr = netip.AddrPortFrom(netip.AddrFrom16(rawIP.As16()), 0) remote := test.getNode(test.remotekey, test.remoteaddr).Node() done := make(chan struct{}, 1) @@ -665,14 +664,14 @@ func TestUDPv5_PingWithIPV4MappedAddress(t *testing.T) { test.udp.handlePing(&v5wire.Ping{ENRSeq: 1}, remote.ID(), test.remoteaddr) done <- struct{}{} }() - test.waitPacketOut(func(p *v5wire.Pong, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Pong, addr netip.AddrPort, _ v5wire.Nonce) { if len(p.ToIP) == net.IPv6len { t.Error("Received untruncated ip address") } if len(p.ToIP) != net.IPv4len { t.Errorf("Received ip address with incorrect length: %d", len(p.ToIP)) } - if !p.ToIP.Equal(rawIP) { + if !p.ToIP.Equal(rawIP.AsSlice()) { t.Errorf("Received incorrect ip address: wanted %s but received %s", rawIP.String(), p.ToIP.String()) } }) @@ -688,9 +687,9 @@ type udpV5Test struct { db *enode.DB udp *UDPv5 localkey, remotekey *ecdsa.PrivateKey - remoteaddr *net.UDPAddr + remoteaddr netip.AddrPort nodesByID map[enode.ID]*enode.LocalNode - nodesByIP map[string]*enode.LocalNode + nodesByIP map[netip.Addr]*enode.LocalNode } // testCodec is the packet encoding used by protocol tests. This codec does not perform encryption. @@ -750,9 +749,9 @@ func newUDPV5Test(t *testing.T) *udpV5Test { pipe: newpipe(), localkey: newkey(), remotekey: newkey(), - remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303}, + remoteaddr: netip.MustParseAddrPort("10.0.1.99:30303"), nodesByID: make(map[enode.ID]*enode.LocalNode), - nodesByIP: make(map[string]*enode.LocalNode), + nodesByIP: make(map[netip.Addr]*enode.LocalNode), } test.db, _ = enode.OpenDB("") ln := enode.NewLocalNode(test.db, test.localkey) @@ -777,8 +776,8 @@ func (test *udpV5Test) packetIn(packet v5wire.Packet) { test.packetInFrom(test.remotekey, test.remoteaddr, packet) } -// handles a packet as if it had been sent to the transport by the key/endpoint. -func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr *net.UDPAddr, packet v5wire.Packet) { +// packetInFrom handles a packet as if it had been sent to the transport by the key/endpoint. +func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr netip.AddrPort, packet v5wire.Packet) { test.t.Helper() ln := test.getNode(key, addr) @@ -793,22 +792,22 @@ func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr *net.UDPAddr, pa } // getNode ensures the test knows about a node at the given endpoint. -func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr *net.UDPAddr) *enode.LocalNode { +func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr netip.AddrPort) *enode.LocalNode { id := encodePubkey(&key.PublicKey).id() ln := test.nodesByID[id] if ln == nil { db, _ := enode.OpenDB("") ln = enode.NewLocalNode(db, key) - ln.SetStaticIP(addr.IP) - ln.Set(enr.UDP(addr.Port)) + ln.SetStaticIP(addr.Addr().AsSlice()) + ln.Set(enr.UDP(addr.Port())) test.nodesByID[id] = ln } - test.nodesByIP[string(addr.IP)] = ln + test.nodesByIP[addr.Addr()] = ln return ln } // waitPacketOut waits for the next output packet and handles it using the given 'validate' -// function. The function must be of type func (X, *net.UDPAddr, v5wire.Nonce) where X is +// function. The function must be of type func (X, netip.AddrPort, v5wire.Nonce) where X is // assignable to packetV5. func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { test.t.Helper() @@ -824,7 +823,7 @@ func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { test.t.Fatalf("timed out waiting for %v", exptype) return false } - ln := test.nodesByIP[string(dgram.to.IP)] + ln := test.nodesByIP[dgram.to.Addr()] if ln == nil { test.t.Fatalf("attempt to send to non-existing node %v", &dgram.to) return false @@ -839,7 +838,7 @@ func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { test.t.Errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype) return false } - fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(&dgram.to), reflect.ValueOf(frame.AuthTag)}) + fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(dgram.to), reflect.ValueOf(frame.AuthTag)}) return false } diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index a5387311a5d0..27966f2afc6b 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -30,6 +30,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" @@ -283,9 +284,38 @@ func TestDecodeErrorsV5(t *testing.T) { b = make([]byte, 63) net.nodeA.expectDecodeErr(t, errInvalidHeader, b) - // TODO some more tests would be nice :) - // - check invalid authdata sizes - // - check invalid handshake data sizes + t.Run("invalid-handshake-datasize", func(t *testing.T) { + requiredNumber := 108 + + testDataFile := filepath.Join("testdata", "v5.1-ping-handshake"+".txt") + enc := hexFile(testDataFile) + //delete some byte from handshake to make it invalid + enc = enc[:len(enc)-requiredNumber] + net.nodeB.expectDecodeErr(t, errMsgTooShort, enc) + }) + + t.Run("invalid-auth-datasize", func(t *testing.T) { + testPacket := []byte{} + testDataFiles := []string{"v5.1-whoareyou", "v5.1-ping-handshake"} + for counter, name := range testDataFiles { + file := filepath.Join("testdata", name+".txt") + enc := hexFile(file) + if counter == 0 { + //make whoareyou header + testPacket = enc[:sizeofStaticPacketData-1] + testPacket = append(testPacket, 255) + } + if counter == 1 { + //append invalid auth size + testPacket = append(testPacket, enc[sizeofStaticPacketData:]...) + } + } + + wantErr := "invalid auth size" + if _, err := net.nodeB.decode(testPacket); strings.HasSuffix(err.Error(), wantErr) { + t.Fatal(fmt.Errorf("(%s) got err %q, want %q", net.nodeB.ln.ID().TerminalString(), err, wantErr)) + } + }) } // This test checks that all test vectors can be decoded. diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go index 6ad7f809a71d..db7841c047a1 100644 --- a/p2p/enode/idscheme.go +++ b/p2p/enode/idscheme.go @@ -157,5 +157,5 @@ func SignNull(r *enr.Record, id ID) *Node { if err := r.SetSig(NullID{}, []byte{}); err != nil { panic(err) } - return &Node{r: *r, id: id} + return newNodeWithID(r, id) } diff --git a/p2p/enode/node.go b/p2p/enode/node.go index d7a1a9a1561c..cb4ac8d1726c 100644 --- a/p2p/enode/node.go +++ b/p2p/enode/node.go @@ -24,6 +24,7 @@ import ( "fmt" "math/bits" "net" + "net/netip" "strings" "github.com/ethereum/go-ethereum/p2p/enr" @@ -36,6 +37,10 @@ var errMissingPrefix = errors.New("missing 'enr:' prefix for base64-encoded reco type Node struct { r enr.Record id ID + // endpoint information + ip netip.Addr + udp uint16 + tcp uint16 } // New wraps a node record. The record must be valid according to the given @@ -44,11 +49,76 @@ func New(validSchemes enr.IdentityScheme, r *enr.Record) (*Node, error) { if err := r.VerifySignature(validSchemes); err != nil { return nil, err } - node := &Node{r: *r} - if n := copy(node.id[:], validSchemes.NodeAddr(&node.r)); n != len(ID{}) { - return nil, fmt.Errorf("invalid node ID length %d, need %d", n, len(ID{})) + var id ID + if n := copy(id[:], validSchemes.NodeAddr(r)); n != len(id) { + return nil, fmt.Errorf("invalid node ID length %d, need %d", n, len(id)) + } + return newNodeWithID(r, id), nil +} + +func newNodeWithID(r *enr.Record, id ID) *Node { + n := &Node{r: *r, id: id} + // Set the preferred endpoint. + // Here we decide between IPv4 and IPv6, choosing the 'most global' address. + var ip4 netip.Addr + var ip6 netip.Addr + n.Load((*enr.IPv4Addr)(&ip4)) + n.Load((*enr.IPv6Addr)(&ip6)) + valid4 := validIP(ip4) + valid6 := validIP(ip6) + switch { + case valid4 && valid6: + if localityScore(ip4) >= localityScore(ip6) { + n.setIP4(ip4) + } else { + n.setIP6(ip6) + } + case valid4: + n.setIP4(ip4) + case valid6: + n.setIP6(ip6) + } + return n +} + +// validIP reports whether 'ip' is a valid node endpoint IP address. +func validIP(ip netip.Addr) bool { + return ip.IsValid() && !ip.IsMulticast() +} + +func localityScore(ip netip.Addr) int { + switch { + case ip.IsUnspecified(): + return 0 + case ip.IsLoopback(): + return 1 + case ip.IsLinkLocalUnicast(): + return 2 + case ip.IsPrivate(): + return 3 + default: + return 4 + } +} + +func (n *Node) setIP4(ip netip.Addr) { + n.ip = ip + n.Load((*enr.UDP)(&n.udp)) + n.Load((*enr.TCP)(&n.tcp)) +} + +func (n *Node) setIP6(ip netip.Addr) { + if ip.Is4In6() { + n.setIP4(ip) + return + } + n.ip = ip + if err := n.Load((*enr.UDP6)(&n.udp)); err != nil { + n.Load((*enr.UDP)(&n.udp)) + } + if err := n.Load((*enr.TCP6)(&n.tcp)); err != nil { + n.Load((*enr.TCP)(&n.tcp)) } - return node, nil } // MustParse parses a node record or enode:// URL. It panics if the input is invalid. @@ -89,43 +159,45 @@ func (n *Node) Seq() uint64 { return n.r.Seq() } -// Incomplete returns true for nodes with no IP address. -func (n *Node) Incomplete() bool { - return n.IP() == nil -} - // Load retrieves an entry from the underlying record. func (n *Node) Load(k enr.Entry) error { return n.r.Load(k) } -// IP returns the IP address of the node. This prefers IPv4 addresses. +// IP returns the IP address of the node. func (n *Node) IP() net.IP { - var ( - ip4 enr.IPv4 - ip6 enr.IPv6 - ) - if n.Load(&ip4) == nil { - return net.IP(ip4) - } - if n.Load(&ip6) == nil { - return net.IP(ip6) - } - return nil + return net.IP(n.ip.AsSlice()) +} + +// IPAddr returns the IP address of the node. +func (n *Node) IPAddr() netip.Addr { + return n.ip } // UDP returns the UDP port of the node. func (n *Node) UDP() int { - var port enr.UDP - n.Load(&port) - return int(port) + return int(n.udp) } // TCP returns the TCP port of the node. func (n *Node) TCP() int { - var port enr.TCP - n.Load(&port) - return int(port) + return int(n.tcp) +} + +// UDPEndpoint returns the announced UDP endpoint. +func (n *Node) UDPEndpoint() (netip.AddrPort, bool) { + if !n.ip.IsValid() || n.ip.IsUnspecified() || n.udp == 0 { + return netip.AddrPort{}, false + } + return netip.AddrPortFrom(n.ip, n.udp), true +} + +// TCPEndpoint returns the announced TCP endpoint. +func (n *Node) TCPEndpoint() (netip.AddrPort, bool) { + if !n.ip.IsValid() || n.ip.IsUnspecified() || n.tcp == 0 { + return netip.AddrPort{}, false + } + return netip.AddrPortFrom(n.ip, n.tcp), true } // Pubkey returns the secp256k1 public key of the node, if present. @@ -147,16 +219,15 @@ func (n *Node) Record() *enr.Record { // ValidateComplete checks whether n has a valid IP and UDP port. // Deprecated: don't use this method. func (n *Node) ValidateComplete() error { - if n.Incomplete() { + if !n.ip.IsValid() { return errors.New("missing IP address") } - if n.UDP() == 0 { - return errors.New("missing UDP port") - } - ip := n.IP() - if ip.IsMulticast() || ip.IsUnspecified() { + if n.ip.IsMulticast() || n.ip.IsUnspecified() { return errors.New("invalid IP (multicast/unspecified)") } + if n.udp == 0 { + return errors.New("missing UDP port") + } // Validate the node key (on curve, etc.). var key Secp256k1 return n.Load(&key) diff --git a/p2p/enode/node_test.go b/p2p/enode/node_test.go index d15859c477a5..56e196e82e2d 100644 --- a/p2p/enode/node_test.go +++ b/p2p/enode/node_test.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "math/big" + "net/netip" "testing" "testing/quick" @@ -64,6 +65,167 @@ func TestPythonInterop(t *testing.T) { } } +func TestNodeEndpoints(t *testing.T) { + id := HexID("00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") + type endpointTest struct { + name string + node *Node + wantIP netip.Addr + wantUDP int + wantTCP int + } + tests := []endpointTest{ + { + name: "no-addr", + node: func() *Node { + var r enr.Record + return SignNull(&r, id) + }(), + }, + { + name: "udp-only", + node: func() *Node { + var r enr.Record + r.Set(enr.UDP(9000)) + return SignNull(&r, id) + }(), + }, + { + name: "tcp-only", + node: func() *Node { + var r enr.Record + r.Set(enr.TCP(9000)) + return SignNull(&r, id) + }(), + }, + { + name: "ipv4-only-loopback", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("127.0.0.1"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("127.0.0.1"), + }, + { + name: "ipv4-only-unspecified", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("0.0.0.0"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("0.0.0.0"), + }, + { + name: "ipv4-only", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("99.22.33.1"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("99.22.33.1"), + }, + { + name: "ipv6-only", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + }, + { + name: "ipv4-loopback-and-ipv6-global", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("127.0.0.1"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + wantUDP: 30306, + }, + { + name: "ipv4-unspecified-and-ipv6-loopback", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("0.0.0.0"))) + r.Set(enr.IPv6Addr(netip.MustParseAddr("::1"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("::1"), + }, + { + name: "ipv4-private-and-ipv6-global", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("192.168.2.2"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + wantUDP: 30306, + }, + { + name: "ipv4-local-and-ipv6-global", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("169.254.2.6"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + wantUDP: 30306, + }, + { + name: "ipv4-private-and-ipv6-private", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("192.168.2.2"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("fd00::abcd:1"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("192.168.2.2"), + wantUDP: 30304, + }, + { + name: "ipv4-private-and-ipv6-link-local", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("192.168.2.2"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("fe80::1"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("192.168.2.2"), + wantUDP: 30304, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.wantIP != test.node.IPAddr() { + t.Errorf("node has wrong IP %v, want %v", test.node.IPAddr(), test.wantIP) + } + if test.wantUDP != test.node.UDP() { + t.Errorf("node has wrong UDP port %d, want %d", test.node.UDP(), test.wantUDP) + } + if test.wantTCP != test.node.TCP() { + t.Errorf("node has wrong TCP port %d, want %d", test.node.TCP(), test.wantTCP) + } + }) + } +} + func TestHexID(t *testing.T) { ref := ID{0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188} id1 := HexID("0x00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 6d55ce17f130..654d71d47b6a 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -26,6 +26,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rlp" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" @@ -242,13 +243,14 @@ func (db *DB) Node(id ID) *Node { } func mustDecodeNode(id, data []byte) *Node { - node := new(Node) - if err := rlp.DecodeBytes(data, &node.r); err != nil { + var r enr.Record + if err := rlp.DecodeBytes(data, &r); err != nil { panic(fmt.Errorf("p2p/enode: can't decode node %x in DB: %v", id, err)) } - // Restore node id cache. - copy(node.id[:], id) - return node + if len(id) != len(ID{}) { + panic(fmt.Errorf("invalid id length %d", len(id))) + } + return newNodeWithID(&r, ID(id)) } // UpdateNode inserts - potentially overwriting - a node into the peer database. diff --git a/p2p/enode/urlv4.go b/p2p/enode/urlv4.go index 0272eee98725..a55dfa6632b3 100644 --- a/p2p/enode/urlv4.go +++ b/p2p/enode/urlv4.go @@ -181,7 +181,7 @@ func (n *Node) URLv4() string { nodeid = fmt.Sprintf("%s.%x", scheme, n.id[:]) } u := url.URL{Scheme: "enode"} - if n.Incomplete() { + if !n.ip.IsValid() { u.Host = nodeid } else { addr := net.TCPAddr{IP: n.IP(), Port: n.TCP()} diff --git a/p2p/enr/entries.go b/p2p/enr/entries.go index 9945a436c9f8..917e1becbaac 100644 --- a/p2p/enr/entries.go +++ b/p2p/enr/entries.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "net" + "net/netip" "github.com/ethereum/go-ethereum/rlp" ) @@ -167,6 +168,60 @@ func (v *IPv6) DecodeRLP(s *rlp.Stream) error { return nil } +// IPv4Addr is the "ip" key, which holds the IP address of the node. +type IPv4Addr netip.Addr + +func (v IPv4Addr) ENRKey() string { return "ip" } + +// EncodeRLP implements rlp.Encoder. +func (v IPv4Addr) EncodeRLP(w io.Writer) error { + addr := netip.Addr(v) + if !addr.Is4() { + return fmt.Errorf("address is not IPv4") + } + enc := rlp.NewEncoderBuffer(w) + bytes := addr.As4() + enc.WriteBytes(bytes[:]) + return enc.Flush() +} + +// DecodeRLP implements rlp.Decoder. +func (v *IPv4Addr) DecodeRLP(s *rlp.Stream) error { + var bytes [4]byte + if err := s.ReadBytes(bytes[:]); err != nil { + return err + } + *v = IPv4Addr(netip.AddrFrom4(bytes)) + return nil +} + +// IPv6Addr is the "ip6" key, which holds the IP address of the node. +type IPv6Addr netip.Addr + +func (v IPv6Addr) ENRKey() string { return "ip6" } + +// EncodeRLP implements rlp.Encoder. +func (v IPv6Addr) EncodeRLP(w io.Writer) error { + addr := netip.Addr(v) + if !addr.Is6() { + return fmt.Errorf("address is not IPv6") + } + enc := rlp.NewEncoderBuffer(w) + bytes := addr.As16() + enc.WriteBytes(bytes[:]) + return enc.Flush() +} + +// DecodeRLP implements rlp.Decoder. +func (v *IPv6Addr) DecodeRLP(s *rlp.Stream) error { + var bytes [16]byte + if err := s.ReadBytes(bytes[:]); err != nil { + return err + } + *v = IPv6Addr(netip.AddrFrom16(bytes)) + return nil +} + // KeyError is an error related to a key. type KeyError struct { Key string diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go deleted file mode 100644 index 8052144465a5..000000000000 --- a/p2p/nodestate/nodestate.go +++ /dev/null @@ -1,1023 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package nodestate - -import ( - "errors" - "reflect" - "sync" - "time" - "unsafe" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/rlp" -) - -var ( - ErrInvalidField = errors.New("invalid field type") - ErrClosed = errors.New("already closed") -) - -type ( - // NodeStateMachine implements a network node-related event subscription system. - // It can assign binary state flags and fields of arbitrary type to each node and allows - // subscriptions to flag/field changes which can also modify further flags and fields, - // potentially triggering further subscriptions. An operation includes an initial change - // and all resulting subsequent changes and always ends in a consistent global state. - // It is initiated by a "top level" SetState/SetField call that blocks (also blocking other - // top-level functions) until the operation is finished. Callbacks making further changes - // should use the non-blocking SetStateSub/SetFieldSub functions. The tree of events - // resulting from the initial changes is traversed in a breadth-first order, ensuring for - // each subscription callback that all other callbacks caused by the same change triggering - // the current callback are processed before anything is triggered by the changes made in the - // current callback. In practice this logic ensures that all subscriptions "see" events in - // the logical order, callbacks are never called concurrently and "back and forth" effects - // are also possible. The state machine design should ensure that infinite event cycles - // cannot happen. - // The caller can also add timeouts assigned to a certain node and a subset of state flags. - // If the timeout elapses, the flags are reset. If all relevant flags are reset then the timer - // is dropped. State flags with no timeout are persisted in the database if the flag - // descriptor enables saving. If a node has no state flags set at any moment then it is discarded. - // Note: in order to avoid mutex deadlocks the callbacks should never lock a mutex that - // might be locked when the top level SetState/SetField functions are called. If a function - // potentially performs state/field changes then it is recommended to mention this fact in the - // function description, along with whether it should run inside an operation callback. - NodeStateMachine struct { - started, closed bool - lock sync.Mutex - clock mclock.Clock - db ethdb.KeyValueStore - dbNodeKey []byte - nodes map[enode.ID]*nodeInfo - offlineCallbackList []offlineCallback - opFlag bool // an operation has started - opWait *sync.Cond // signaled when the operation ends - opPending []func() // pending callback list of the current operation - - // Registered state flags or fields. Modifications are allowed - // only when the node state machine has not been started. - setup *Setup - fields []*fieldInfo - saveFlags bitMask - - // Installed callbacks. Modifications are allowed only when the - // node state machine has not been started. - stateSubs []stateSub - - // Testing hooks, only for testing purposes. - saveNodeHook func(*nodeInfo) - } - - // Flags represents a set of flags from a certain setup - Flags struct { - mask bitMask - setup *Setup - } - - // Field represents a field from a certain setup - Field struct { - index int - setup *Setup - } - - // flagDefinition describes a node state flag. Each registered instance is automatically - // mapped to a bit of the 64 bit node states. - // If persistent is true then the node is saved when state machine is shutdown. - flagDefinition struct { - name string - persistent bool - } - - // fieldDefinition describes an optional node field of the given type. The contents - // of the field are only retained for each node as long as at least one of the - // state flags is set. - fieldDefinition struct { - name string - ftype reflect.Type - encode func(interface{}) ([]byte, error) - decode func([]byte) (interface{}, error) - } - - // Setup contains the list of flags and fields used by the application - Setup struct { - Version uint - flags []flagDefinition - fields []fieldDefinition - } - - // bitMask describes a node state or state mask. It represents a subset - // of node flags with each bit assigned to a flag index (LSB represents flag 0). - bitMask uint64 - - // StateCallback is a subscription callback which is called when one of the - // state flags that is included in the subscription state mask is changed. - // Note: oldState and newState are also masked with the subscription mask so only - // the relevant bits are included. - StateCallback func(n *enode.Node, oldState, newState Flags) - - // FieldCallback is a subscription callback which is called when the value of - // a specific field is changed. - FieldCallback func(n *enode.Node, state Flags, oldValue, newValue interface{}) - - // nodeInfo contains node state, fields and state timeouts - nodeInfo struct { - node *enode.Node - state bitMask - timeouts []*nodeStateTimeout - fields []interface{} - fieldCount int - db, dirty bool - } - - nodeInfoEnc struct { - Enr enr.Record - Version uint - State bitMask - Fields [][]byte - } - - stateSub struct { - mask bitMask - callback StateCallback - } - - nodeStateTimeout struct { - mask bitMask - timer mclock.Timer - } - - fieldInfo struct { - fieldDefinition - subs []FieldCallback - } - - offlineCallback struct { - node *nodeInfo - state bitMask - fields []interface{} - } -) - -// offlineState is a special state that is assumed to be set before a node is loaded from -// the database and after it is shut down. -const offlineState = bitMask(1) - -// NewFlag creates a new node state flag -func (s *Setup) NewFlag(name string) Flags { - if s.flags == nil { - s.flags = []flagDefinition{{name: "offline"}} - } - f := Flags{mask: bitMask(1) << uint(len(s.flags)), setup: s} - s.flags = append(s.flags, flagDefinition{name: name}) - return f -} - -// NewPersistentFlag creates a new persistent node state flag -func (s *Setup) NewPersistentFlag(name string) Flags { - if s.flags == nil { - s.flags = []flagDefinition{{name: "offline"}} - } - f := Flags{mask: bitMask(1) << uint(len(s.flags)), setup: s} - s.flags = append(s.flags, flagDefinition{name: name, persistent: true}) - return f -} - -// OfflineFlag returns the system-defined offline flag belonging to the given setup -func (s *Setup) OfflineFlag() Flags { - return Flags{mask: offlineState, setup: s} -} - -// NewField creates a new node state field -func (s *Setup) NewField(name string, ftype reflect.Type) Field { - f := Field{index: len(s.fields), setup: s} - s.fields = append(s.fields, fieldDefinition{ - name: name, - ftype: ftype, - }) - return f -} - -// NewPersistentField creates a new persistent node field -func (s *Setup) NewPersistentField(name string, ftype reflect.Type, encode func(interface{}) ([]byte, error), decode func([]byte) (interface{}, error)) Field { - f := Field{index: len(s.fields), setup: s} - s.fields = append(s.fields, fieldDefinition{ - name: name, - ftype: ftype, - encode: encode, - decode: decode, - }) - return f -} - -// flagOp implements binary flag operations and also checks whether the operands belong to the same setup -func flagOp(a, b Flags, trueIfA, trueIfB, trueIfBoth bool) Flags { - if a.setup == nil { - if a.mask != 0 { - panic("Node state flags have no setup reference") - } - a.setup = b.setup - } - if b.setup == nil { - if b.mask != 0 { - panic("Node state flags have no setup reference") - } - b.setup = a.setup - } - if a.setup != b.setup { - panic("Node state flags belong to a different setup") - } - res := Flags{setup: a.setup} - if trueIfA { - res.mask |= a.mask & ^b.mask - } - if trueIfB { - res.mask |= b.mask & ^a.mask - } - if trueIfBoth { - res.mask |= a.mask & b.mask - } - return res -} - -// And returns the set of flags present in both a and b -func (a Flags) And(b Flags) Flags { return flagOp(a, b, false, false, true) } - -// AndNot returns the set of flags present in a but not in b -func (a Flags) AndNot(b Flags) Flags { return flagOp(a, b, true, false, false) } - -// Or returns the set of flags present in either a or b -func (a Flags) Or(b Flags) Flags { return flagOp(a, b, true, true, true) } - -// Xor returns the set of flags present in either a or b but not both -func (a Flags) Xor(b Flags) Flags { return flagOp(a, b, true, true, false) } - -// HasAll returns true if b is a subset of a -func (a Flags) HasAll(b Flags) bool { return flagOp(a, b, false, true, false).mask == 0 } - -// HasNone returns true if a and b have no shared flags -func (a Flags) HasNone(b Flags) bool { return flagOp(a, b, false, false, true).mask == 0 } - -// Equals returns true if a and b have the same flags set -func (a Flags) Equals(b Flags) bool { return flagOp(a, b, true, true, false).mask == 0 } - -// IsEmpty returns true if a has no flags set -func (a Flags) IsEmpty() bool { return a.mask == 0 } - -// MergeFlags merges multiple sets of state flags -func MergeFlags(list ...Flags) Flags { - if len(list) == 0 { - return Flags{} - } - res := list[0] - for i := 1; i < len(list); i++ { - res = res.Or(list[i]) - } - return res -} - -// String returns a list of the names of the flags specified in the bit mask -func (f Flags) String() string { - if f.mask == 0 { - return "[]" - } - s := "[" - comma := false - for index, flag := range f.setup.flags { - if f.mask&(bitMask(1)< 8*int(unsafe.Sizeof(bitMask(0))) { - panic("Too many node state flags") - } - ns := &NodeStateMachine{ - db: db, - dbNodeKey: dbKey, - clock: clock, - setup: setup, - nodes: make(map[enode.ID]*nodeInfo), - fields: make([]*fieldInfo, len(setup.fields)), - } - ns.opWait = sync.NewCond(&ns.lock) - stateNameMap := make(map[string]int, len(setup.flags)) - for index, flag := range setup.flags { - if _, ok := stateNameMap[flag.name]; ok { - panic("Node state flag name collision: " + flag.name) - } - stateNameMap[flag.name] = index - if flag.persistent { - ns.saveFlags |= bitMask(1) << uint(index) - } - } - fieldNameMap := make(map[string]int, len(setup.fields)) - for index, field := range setup.fields { - if _, ok := fieldNameMap[field.name]; ok { - panic("Node field name collision: " + field.name) - } - ns.fields[index] = &fieldInfo{fieldDefinition: field} - fieldNameMap[field.name] = index - } - return ns -} - -// stateMask checks whether the set of flags belongs to the same setup and returns its internal bit mask -func (ns *NodeStateMachine) stateMask(flags Flags) bitMask { - if flags.setup != ns.setup && flags.mask != 0 { - panic("Node state flags belong to a different setup") - } - return flags.mask -} - -// fieldIndex checks whether the field belongs to the same setup and returns its internal index -func (ns *NodeStateMachine) fieldIndex(field Field) int { - if field.setup != ns.setup { - panic("Node field belongs to a different setup") - } - return field.index -} - -// SubscribeState adds a node state subscription. The callback is called while the state -// machine mutex is not held and it is allowed to make further state updates using the -// non-blocking SetStateSub/SetFieldSub functions. All callbacks of an operation are running -// from the thread/goroutine of the initial caller and parallel operations are not permitted. -// Therefore the callback is never called concurrently. It is the responsibility of the -// implemented state logic to avoid deadlocks and to reach a stable state in a finite amount -// of steps. -// State subscriptions should be installed before loading the node database or making the -// first state update. -func (ns *NodeStateMachine) SubscribeState(flags Flags, callback StateCallback) { - ns.lock.Lock() - defer ns.lock.Unlock() - - if ns.started { - panic("state machine already started") - } - ns.stateSubs = append(ns.stateSubs, stateSub{ns.stateMask(flags), callback}) -} - -// SubscribeField adds a node field subscription. Same rules apply as for SubscribeState. -func (ns *NodeStateMachine) SubscribeField(field Field, callback FieldCallback) { - ns.lock.Lock() - defer ns.lock.Unlock() - - if ns.started { - panic("state machine already started") - } - f := ns.fields[ns.fieldIndex(field)] - f.subs = append(f.subs, callback) -} - -// newNode creates a new nodeInfo -func (ns *NodeStateMachine) newNode(n *enode.Node) *nodeInfo { - return &nodeInfo{node: n, fields: make([]interface{}, len(ns.fields))} -} - -// checkStarted checks whether the state machine has already been started and panics otherwise. -func (ns *NodeStateMachine) checkStarted() { - if !ns.started { - panic("state machine not started yet") - } -} - -// Start starts the state machine, enabling state and field operations and disabling -// further subscriptions. -func (ns *NodeStateMachine) Start() { - ns.lock.Lock() - if ns.started { - panic("state machine already started") - } - ns.started = true - if ns.db != nil { - ns.loadFromDb() - } - - ns.opStart() - ns.offlineCallbacks(true) - ns.opFinish() - ns.lock.Unlock() -} - -// Stop stops the state machine and saves its state if a database was supplied -func (ns *NodeStateMachine) Stop() { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if !ns.opStart() { - panic("already closed") - } - for _, node := range ns.nodes { - fields := make([]interface{}, len(node.fields)) - copy(fields, node.fields) - ns.offlineCallbackList = append(ns.offlineCallbackList, offlineCallback{node, node.state, fields}) - } - if ns.db != nil { - ns.saveToDb() - } - ns.offlineCallbacks(false) - ns.closed = true - ns.opFinish() -} - -// loadFromDb loads persisted node states from the database -func (ns *NodeStateMachine) loadFromDb() { - it := ns.db.NewIterator(ns.dbNodeKey, nil) - for it.Next() { - var id enode.ID - if len(it.Key()) != len(ns.dbNodeKey)+len(id) { - log.Error("Node state db entry with invalid length", "found", len(it.Key()), "expected", len(ns.dbNodeKey)+len(id)) - continue - } - copy(id[:], it.Key()[len(ns.dbNodeKey):]) - ns.decodeNode(id, it.Value()) - } -} - -type dummyIdentity enode.ID - -func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil } -func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] } - -// decodeNode decodes a node database entry and adds it to the node set if successful -func (ns *NodeStateMachine) decodeNode(id enode.ID, data []byte) { - var enc nodeInfoEnc - if err := rlp.DecodeBytes(data, &enc); err != nil { - log.Error("Failed to decode node info", "id", id, "error", err) - return - } - n, _ := enode.New(dummyIdentity(id), &enc.Enr) - node := ns.newNode(n) - node.db = true - - if enc.Version != ns.setup.Version { - log.Debug("Removing stored node with unknown version", "current", ns.setup.Version, "stored", enc.Version) - ns.deleteNode(id) - return - } - if len(enc.Fields) > len(ns.setup.fields) { - log.Error("Invalid node field count", "id", id, "stored", len(enc.Fields)) - return - } - // Resolve persisted node fields - for i, encField := range enc.Fields { - if len(encField) == 0 { - continue - } - if decode := ns.fields[i].decode; decode != nil { - if field, err := decode(encField); err == nil { - node.fields[i] = field - node.fieldCount++ - } else { - log.Error("Failed to decode node field", "id", id, "field name", ns.fields[i].name, "error", err) - return - } - } else { - log.Error("Cannot decode node field", "id", id, "field name", ns.fields[i].name) - return - } - } - // It's a compatible node record, add it to set. - ns.nodes[id] = node - node.state = enc.State - fields := make([]interface{}, len(node.fields)) - copy(fields, node.fields) - ns.offlineCallbackList = append(ns.offlineCallbackList, offlineCallback{node, node.state, fields}) - log.Debug("Loaded node state", "id", id, "state", Flags{mask: enc.State, setup: ns.setup}) -} - -// saveNode saves the given node info to the database -func (ns *NodeStateMachine) saveNode(id enode.ID, node *nodeInfo) error { - if ns.db == nil { - return nil - } - - storedState := node.state & ns.saveFlags - for _, t := range node.timeouts { - storedState &= ^t.mask - } - enc := nodeInfoEnc{ - Enr: *node.node.Record(), - Version: ns.setup.Version, - State: storedState, - Fields: make([][]byte, len(ns.fields)), - } - log.Debug("Saved node state", "id", id, "state", Flags{mask: enc.State, setup: ns.setup}) - lastIndex := -1 - for i, f := range node.fields { - if f == nil { - continue - } - encode := ns.fields[i].encode - if encode == nil { - continue - } - blob, err := encode(f) - if err != nil { - return err - } - enc.Fields[i] = blob - lastIndex = i - } - if storedState == 0 && lastIndex == -1 { - if node.db { - node.db = false - ns.deleteNode(id) - } - node.dirty = false - return nil - } - enc.Fields = enc.Fields[:lastIndex+1] - data, err := rlp.EncodeToBytes(&enc) - if err != nil { - return err - } - if err := ns.db.Put(append(ns.dbNodeKey, id[:]...), data); err != nil { - return err - } - node.dirty, node.db = false, true - - if ns.saveNodeHook != nil { - ns.saveNodeHook(node) - } - return nil -} - -// deleteNode removes a node info from the database -func (ns *NodeStateMachine) deleteNode(id enode.ID) { - ns.db.Delete(append(ns.dbNodeKey, id[:]...)) -} - -// saveToDb saves the persistent flags and fields of all nodes that have been changed -func (ns *NodeStateMachine) saveToDb() { - for id, node := range ns.nodes { - if node.dirty { - err := ns.saveNode(id, node) - if err != nil { - log.Error("Failed to save node", "id", id, "error", err) - } - } - } -} - -// updateEnode updates the enode entry belonging to the given node if it already exists -func (ns *NodeStateMachine) updateEnode(n *enode.Node) (enode.ID, *nodeInfo) { - id := n.ID() - node := ns.nodes[id] - if node != nil && n.Seq() > node.node.Seq() { - node.node = n - node.dirty = true - } - return id, node -} - -// Persist saves the persistent state and fields of the given node immediately -func (ns *NodeStateMachine) Persist(n *enode.Node) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if id, node := ns.updateEnode(n); node != nil && node.dirty { - err := ns.saveNode(id, node) - if err != nil { - log.Error("Failed to save node", "id", id, "error", err) - } - return err - } - return nil -} - -// SetState updates the given node state flags and blocks until the operation is finished. -// If a flag with a timeout is set again, the operation removes or replaces the existing timeout. -func (ns *NodeStateMachine) SetState(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - if !ns.opStart() { - return ErrClosed - } - ns.setState(n, setFlags, resetFlags, timeout) - ns.opFinish() - return nil -} - -// SetStateSub updates the given node state flags without blocking (should be called -// from a subscription/operation callback). -func (ns *NodeStateMachine) SetStateSub(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.opCheck() - ns.setState(n, setFlags, resetFlags, timeout) -} - -func (ns *NodeStateMachine) setState(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) { - ns.checkStarted() - set, reset := ns.stateMask(setFlags), ns.stateMask(resetFlags) - id, node := ns.updateEnode(n) - if node == nil { - if set == 0 { - return - } - node = ns.newNode(n) - ns.nodes[id] = node - } - oldState := node.state - newState := (node.state & (^reset)) | set - changed := oldState ^ newState - node.state = newState - - // Remove the timeout callbacks for all reset and set flags, - // even they are not existent(it's noop). - ns.removeTimeouts(node, set|reset) - - // Register the timeout callback if required - if timeout != 0 && set != 0 { - ns.addTimeout(n, set, timeout) - } - if newState == oldState { - return - } - if newState == 0 && node.fieldCount == 0 { - delete(ns.nodes, id) - if node.db { - ns.deleteNode(id) - } - } else { - if changed&ns.saveFlags != 0 { - node.dirty = true - } - } - callback := func() { - for _, sub := range ns.stateSubs { - if changed&sub.mask != 0 { - sub.callback(n, Flags{mask: oldState & sub.mask, setup: ns.setup}, Flags{mask: newState & sub.mask, setup: ns.setup}) - } - } - } - ns.opPending = append(ns.opPending, callback) -} - -// opCheck checks whether an operation is active -func (ns *NodeStateMachine) opCheck() { - if !ns.opFlag { - panic("Operation has not started") - } -} - -// opStart waits until other operations are finished and starts a new one -func (ns *NodeStateMachine) opStart() bool { - for ns.opFlag { - ns.opWait.Wait() - } - if ns.closed { - return false - } - ns.opFlag = true - return true -} - -// opFinish finishes the current operation by running all pending callbacks. -// Callbacks resulting from a state/field change performed in a previous callback are always -// put at the end of the pending list and therefore processed after all callbacks resulting -// from the previous state/field change. -func (ns *NodeStateMachine) opFinish() { - for len(ns.opPending) != 0 { - list := ns.opPending - ns.lock.Unlock() - for _, cb := range list { - cb() - } - ns.lock.Lock() - ns.opPending = ns.opPending[len(list):] - } - ns.opPending = nil - ns.opFlag = false - ns.opWait.Broadcast() -} - -// Operation calls the given function as an operation callback. This allows the caller -// to start an operation with multiple initial changes. The same rules apply as for -// subscription callbacks. -func (ns *NodeStateMachine) Operation(fn func()) error { - ns.lock.Lock() - started := ns.opStart() - ns.lock.Unlock() - if !started { - return ErrClosed - } - fn() - ns.lock.Lock() - ns.opFinish() - ns.lock.Unlock() - return nil -} - -// offlineCallbacks calls state update callbacks at startup or shutdown -func (ns *NodeStateMachine) offlineCallbacks(start bool) { - for _, cb := range ns.offlineCallbackList { - cb := cb - callback := func() { - for _, sub := range ns.stateSubs { - offState := offlineState & sub.mask - onState := cb.state & sub.mask - if offState == onState { - continue - } - if start { - sub.callback(cb.node.node, Flags{mask: offState, setup: ns.setup}, Flags{mask: onState, setup: ns.setup}) - } else { - sub.callback(cb.node.node, Flags{mask: onState, setup: ns.setup}, Flags{mask: offState, setup: ns.setup}) - } - } - for i, f := range cb.fields { - if f == nil || ns.fields[i].subs == nil { - continue - } - for _, fsub := range ns.fields[i].subs { - if start { - fsub(cb.node.node, Flags{mask: offlineState, setup: ns.setup}, nil, f) - } else { - fsub(cb.node.node, Flags{mask: offlineState, setup: ns.setup}, f, nil) - } - } - } - } - ns.opPending = append(ns.opPending, callback) - } - ns.offlineCallbackList = nil -} - -// AddTimeout adds a node state timeout associated to the given state flag(s). -// After the specified time interval, the relevant states will be reset. -func (ns *NodeStateMachine) AddTimeout(n *enode.Node, flags Flags, timeout time.Duration) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if ns.closed { - return ErrClosed - } - ns.addTimeout(n, ns.stateMask(flags), timeout) - return nil -} - -// addTimeout adds a node state timeout associated to the given state flag(s). -func (ns *NodeStateMachine) addTimeout(n *enode.Node, mask bitMask, timeout time.Duration) { - _, node := ns.updateEnode(n) - if node == nil { - return - } - mask &= node.state - if mask == 0 { - return - } - ns.removeTimeouts(node, mask) - t := &nodeStateTimeout{mask: mask} - t.timer = ns.clock.AfterFunc(timeout, func() { - ns.lock.Lock() - defer ns.lock.Unlock() - - if !ns.opStart() { - return - } - ns.setState(n, Flags{}, Flags{mask: t.mask, setup: ns.setup}, 0) - ns.opFinish() - }) - node.timeouts = append(node.timeouts, t) - if mask&ns.saveFlags != 0 { - node.dirty = true - } -} - -// removeTimeouts removes node state timeouts associated to the given state flag(s). -// If a timeout was associated to multiple flags which are not all included in the -// specified remove mask then only the included flags are de-associated and the timer -// stays active. -func (ns *NodeStateMachine) removeTimeouts(node *nodeInfo, mask bitMask) { - for i := 0; i < len(node.timeouts); i++ { - t := node.timeouts[i] - match := t.mask & mask - if match == 0 { - continue - } - t.mask -= match - if t.mask != 0 { - continue - } - t.timer.Stop() - node.timeouts[i] = node.timeouts[len(node.timeouts)-1] - node.timeouts = node.timeouts[:len(node.timeouts)-1] - i-- - if match&ns.saveFlags != 0 { - node.dirty = true - } - } -} - -// GetField retrieves the given field of the given node. Note that when used in a -// subscription callback the result can be out of sync with the state change represented -// by the callback parameters so extra safety checks might be necessary. -func (ns *NodeStateMachine) GetField(n *enode.Node, field Field) interface{} { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if ns.closed { - return nil - } - if _, node := ns.updateEnode(n); node != nil { - return node.fields[ns.fieldIndex(field)] - } - return nil -} - -// GetState retrieves the current state of the given node. Note that when used in a -// subscription callback the result can be out of sync with the state change represented -// by the callback parameters so extra safety checks might be necessary. -func (ns *NodeStateMachine) GetState(n *enode.Node) Flags { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if ns.closed { - return Flags{} - } - if _, node := ns.updateEnode(n); node != nil { - return Flags{mask: node.state, setup: ns.setup} - } - return Flags{} -} - -// SetField sets the given field of the given node and blocks until the operation is finished -func (ns *NodeStateMachine) SetField(n *enode.Node, field Field, value interface{}) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - if !ns.opStart() { - return ErrClosed - } - err := ns.setField(n, field, value) - ns.opFinish() - return err -} - -// SetFieldSub sets the given field of the given node without blocking (should be called -// from a subscription/operation callback). -func (ns *NodeStateMachine) SetFieldSub(n *enode.Node, field Field, value interface{}) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.opCheck() - return ns.setField(n, field, value) -} - -func (ns *NodeStateMachine) setField(n *enode.Node, field Field, value interface{}) error { - ns.checkStarted() - id, node := ns.updateEnode(n) - if node == nil { - if value == nil { - return nil - } - node = ns.newNode(n) - ns.nodes[id] = node - } - fieldIndex := ns.fieldIndex(field) - f := ns.fields[fieldIndex] - if value != nil && reflect.TypeOf(value) != f.ftype { - log.Error("Invalid field type", "type", reflect.TypeOf(value), "required", f.ftype) - return ErrInvalidField - } - oldValue := node.fields[fieldIndex] - if value == oldValue { - return nil - } - if oldValue != nil { - node.fieldCount-- - } - if value != nil { - node.fieldCount++ - } - node.fields[fieldIndex] = value - if node.state == 0 && node.fieldCount == 0 { - delete(ns.nodes, id) - if node.db { - ns.deleteNode(id) - } - } else { - if f.encode != nil { - node.dirty = true - } - } - state := node.state - callback := func() { - for _, cb := range f.subs { - cb(n, Flags{mask: state, setup: ns.setup}, oldValue, value) - } - } - ns.opPending = append(ns.opPending, callback) - return nil -} - -// ForEach calls the callback for each node having all of the required and none of the -// disabled flags set. -// Note that this callback is not an operation callback but ForEach can be called from an -// Operation callback or Operation can also be called from a ForEach callback if necessary. -func (ns *NodeStateMachine) ForEach(requireFlags, disableFlags Flags, cb func(n *enode.Node, state Flags)) { - ns.lock.Lock() - ns.checkStarted() - type callback struct { - node *enode.Node - state bitMask - } - require, disable := ns.stateMask(requireFlags), ns.stateMask(disableFlags) - var callbacks []callback - for _, node := range ns.nodes { - if node.state&require == require && node.state&disable == 0 { - callbacks = append(callbacks, callback{node.node, node.state & (require | disable)}) - } - } - ns.lock.Unlock() - for _, c := range callbacks { - cb(c.node, Flags{mask: c.state, setup: ns.setup}) - } -} - -// GetNode returns the enode currently associated with the given ID -func (ns *NodeStateMachine) GetNode(id enode.ID) *enode.Node { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if node := ns.nodes[id]; node != nil { - return node.node - } - return nil -} - -// AddLogMetrics adds logging and/or metrics for nodes entering, exiting and currently -// being in a given set specified by required and disabled state flags -func (ns *NodeStateMachine) AddLogMetrics(requireFlags, disableFlags Flags, name string, inMeter, outMeter metrics.Meter, gauge metrics.Gauge) { - var count int64 - ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState Flags) { - oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags) - newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags) - if newMatch == oldMatch { - return - } - - if newMatch { - count++ - if name != "" { - log.Debug("Node entered", "set", name, "id", n.ID(), "count", count) - } - if inMeter != nil { - inMeter.Mark(1) - } - } else { - count-- - if name != "" { - log.Debug("Node left", "set", name, "id", n.ID(), "count", count) - } - if outMeter != nil { - outMeter.Mark(1) - } - } - if gauge != nil { - gauge.Update(count) - } - }) -} diff --git a/p2p/nodestate/nodestate_test.go b/p2p/nodestate/nodestate_test.go deleted file mode 100644 index d06ad755e22e..000000000000 --- a/p2p/nodestate/nodestate_test.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package nodestate - -import ( - "errors" - "fmt" - "reflect" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/rlp" -) - -func testSetup(flagPersist []bool, fieldType []reflect.Type) (*Setup, []Flags, []Field) { - setup := &Setup{} - flags := make([]Flags, len(flagPersist)) - for i, persist := range flagPersist { - if persist { - flags[i] = setup.NewPersistentFlag(fmt.Sprintf("flag-%d", i)) - } else { - flags[i] = setup.NewFlag(fmt.Sprintf("flag-%d", i)) - } - } - fields := make([]Field, len(fieldType)) - for i, ftype := range fieldType { - switch ftype { - case reflect.TypeOf(uint64(0)): - fields[i] = setup.NewPersistentField(fmt.Sprintf("field-%d", i), ftype, uint64FieldEnc, uint64FieldDec) - case reflect.TypeOf(""): - fields[i] = setup.NewPersistentField(fmt.Sprintf("field-%d", i), ftype, stringFieldEnc, stringFieldDec) - default: - fields[i] = setup.NewField(fmt.Sprintf("field-%d", i), ftype) - } - } - return setup, flags, fields -} - -func testNode(b byte) *enode.Node { - r := &enr.Record{} - r.SetSig(dummyIdentity{b}, []byte{42}) - n, _ := enode.New(dummyIdentity{b}, r) - return n -} - -func TestCallback(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{false, false, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - set0 := make(chan struct{}, 1) - set1 := make(chan struct{}, 1) - set2 := make(chan struct{}, 1) - ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { set0 <- struct{}{} }) - ns.SubscribeState(flags[1], func(n *enode.Node, oldState, newState Flags) { set1 <- struct{}{} }) - ns.SubscribeState(flags[2], func(n *enode.Node, oldState, newState Flags) { set2 <- struct{}{} }) - - ns.Start() - - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetState(testNode(1), flags[1], Flags{}, time.Second) - ns.SetState(testNode(1), flags[2], Flags{}, 2*time.Second) - - for i := 0; i < 3; i++ { - select { - case <-set0: - case <-set1: - case <-set2: - case <-time.After(time.Second): - t.Fatalf("failed to invoke callback") - } - } -} - -func TestPersistentFlags(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{true, true, true, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - saveNode := make(chan *nodeInfo, 5) - ns.saveNodeHook = func(node *nodeInfo) { - saveNode <- node - } - - ns.Start() - - ns.SetState(testNode(1), flags[0], Flags{}, time.Second) // state with timeout should not be saved - ns.SetState(testNode(2), flags[1], Flags{}, 0) - ns.SetState(testNode(3), flags[2], Flags{}, 0) - ns.SetState(testNode(4), flags[3], Flags{}, 0) - ns.SetState(testNode(5), flags[0], Flags{}, 0) - ns.Persist(testNode(5)) - select { - case <-saveNode: - case <-time.After(time.Second): - t.Fatalf("Timeout") - } - ns.Stop() - - for i := 0; i < 2; i++ { - select { - case <-saveNode: - case <-time.After(time.Second): - t.Fatalf("Timeout") - } - } - select { - case <-saveNode: - t.Fatalf("Unexpected saveNode") - case <-time.After(time.Millisecond * 100): - } -} - -func TestSetField(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf("")}) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - saveNode := make(chan *nodeInfo, 1) - ns.saveNodeHook = func(node *nodeInfo) { - saveNode <- node - } - - ns.Start() - - // Set field before setting state - ns.SetField(testNode(1), fields[0], "hello world") - field := ns.GetField(testNode(1), fields[0]) - if field == nil { - t.Fatalf("Field should be set before setting states") - } - ns.SetField(testNode(1), fields[0], nil) - field = ns.GetField(testNode(1), fields[0]) - if field != nil { - t.Fatalf("Field should be unset") - } - // Set field after setting state - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetField(testNode(1), fields[0], "hello world") - field = ns.GetField(testNode(1), fields[0]) - if field == nil { - t.Fatalf("Field should be set after setting states") - } - if err := ns.SetField(testNode(1), fields[0], 123); err == nil { - t.Fatalf("Invalid field should be rejected") - } - // Dirty node should be written back - ns.Stop() - select { - case <-saveNode: - case <-time.After(time.Second): - t.Fatalf("Timeout") - } -} - -func TestSetState(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{false, false, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - type change struct{ old, new Flags } - set := make(chan change, 1) - ns.SubscribeState(flags[0].Or(flags[1]), func(n *enode.Node, oldState, newState Flags) { - set <- change{ - old: oldState, - new: newState, - } - }) - - ns.Start() - - check := func(expectOld, expectNew Flags, expectChange bool) { - if expectChange { - select { - case c := <-set: - if !c.old.Equals(expectOld) { - t.Fatalf("Old state mismatch") - } - if !c.new.Equals(expectNew) { - t.Fatalf("New state mismatch") - } - case <-time.After(time.Second): - } - return - } - select { - case <-set: - t.Fatalf("Unexpected change") - case <-time.After(time.Millisecond * 100): - return - } - } - ns.SetState(testNode(1), flags[0], Flags{}, 0) - check(Flags{}, flags[0], true) - - ns.SetState(testNode(1), flags[1], Flags{}, 0) - check(flags[0], flags[0].Or(flags[1]), true) - - ns.SetState(testNode(1), flags[2], Flags{}, 0) - check(Flags{}, Flags{}, false) - - ns.SetState(testNode(1), Flags{}, flags[0], 0) - check(flags[0].Or(flags[1]), flags[1], true) - - ns.SetState(testNode(1), Flags{}, flags[1], 0) - check(flags[1], Flags{}, true) - - ns.SetState(testNode(1), Flags{}, flags[2], 0) - check(Flags{}, Flags{}, false) - - ns.SetState(testNode(1), flags[0].Or(flags[1]), Flags{}, time.Second) - check(Flags{}, flags[0].Or(flags[1]), true) - clock.Run(time.Second) - check(flags[0].Or(flags[1]), Flags{}, true) -} - -func uint64FieldEnc(field interface{}) ([]byte, error) { - if u, ok := field.(uint64); ok { - enc, err := rlp.EncodeToBytes(&u) - return enc, err - } - return nil, errors.New("invalid field type") -} - -func uint64FieldDec(enc []byte) (interface{}, error) { - var u uint64 - err := rlp.DecodeBytes(enc, &u) - return u, err -} - -func stringFieldEnc(field interface{}) ([]byte, error) { - if s, ok := field.(string); ok { - return []byte(s), nil - } - return nil, errors.New("invalid field type") -} - -func stringFieldDec(enc []byte) (interface{}, error) { - return string(enc), nil -} - -func TestPersistentFields(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf(uint64(0)), reflect.TypeOf("")}) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - ns.Start() - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetField(testNode(1), fields[0], uint64(100)) - ns.SetField(testNode(1), fields[1], "hello world") - ns.Stop() - - ns2 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - ns2.Start() - field0 := ns2.GetField(testNode(1), fields[0]) - if !reflect.DeepEqual(field0, uint64(100)) { - t.Fatalf("Field changed") - } - field1 := ns2.GetField(testNode(1), fields[1]) - if !reflect.DeepEqual(field1, "hello world") { - t.Fatalf("Field changed") - } - - s.Version++ - ns3 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - ns3.Start() - if ns3.GetField(testNode(1), fields[0]) != nil { - t.Fatalf("Old field version should have been discarded") - } -} - -func TestFieldSub(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf(uint64(0))}) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - var ( - lastState Flags - lastOldValue, lastNewValue interface{} - ) - ns.SubscribeField(fields[0], func(n *enode.Node, state Flags, oldValue, newValue interface{}) { - lastState, lastOldValue, lastNewValue = state, oldValue, newValue - }) - check := func(state Flags, oldValue, newValue interface{}) { - if !lastState.Equals(state) || lastOldValue != oldValue || lastNewValue != newValue { - t.Fatalf("Incorrect field sub callback (expected [%v %v %v], got [%v %v %v])", state, oldValue, newValue, lastState, lastOldValue, lastNewValue) - } - } - ns.Start() - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetField(testNode(1), fields[0], uint64(100)) - check(flags[0], nil, uint64(100)) - ns.Stop() - check(s.OfflineFlag(), uint64(100), nil) - - ns2 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - ns2.SubscribeField(fields[0], func(n *enode.Node, state Flags, oldValue, newValue interface{}) { - lastState, lastOldValue, lastNewValue = state, oldValue, newValue - }) - ns2.Start() - check(s.OfflineFlag(), nil, uint64(100)) - ns2.SetState(testNode(1), Flags{}, flags[0], 0) - ns2.SetField(testNode(1), fields[0], nil) - check(Flags{}, uint64(100), nil) - ns2.Stop() -} - -func TestDuplicatedFlags(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{true}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - type change struct{ old, new Flags } - set := make(chan change, 1) - ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { - set <- change{oldState, newState} - }) - - ns.Start() - defer ns.Stop() - - check := func(expectOld, expectNew Flags, expectChange bool) { - if expectChange { - select { - case c := <-set: - if !c.old.Equals(expectOld) { - t.Fatalf("Old state mismatch") - } - if !c.new.Equals(expectNew) { - t.Fatalf("New state mismatch") - } - case <-time.After(time.Second): - } - return - } - select { - case <-set: - t.Fatalf("Unexpected change") - case <-time.After(time.Millisecond * 100): - return - } - } - ns.SetState(testNode(1), flags[0], Flags{}, time.Second) - check(Flags{}, flags[0], true) - ns.SetState(testNode(1), flags[0], Flags{}, 2*time.Second) // extend the timeout to 2s - check(Flags{}, flags[0], false) - - clock.Run(2 * time.Second) - check(flags[0], Flags{}, true) -} - -func TestCallbackOrder(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{false, false, false, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { - if newState.Equals(flags[0]) { - ns.SetStateSub(n, flags[1], Flags{}, 0) - ns.SetStateSub(n, flags[2], Flags{}, 0) - } - }) - ns.SubscribeState(flags[1], func(n *enode.Node, oldState, newState Flags) { - if newState.Equals(flags[1]) { - ns.SetStateSub(n, flags[3], Flags{}, 0) - } - }) - lastState := Flags{} - ns.SubscribeState(MergeFlags(flags[1], flags[2], flags[3]), func(n *enode.Node, oldState, newState Flags) { - if !oldState.Equals(lastState) { - t.Fatalf("Wrong callback order") - } - lastState = newState - }) - - ns.Start() - defer ns.Stop() - - ns.SetState(testNode(1), flags[0], Flags{}, 0) -} diff --git a/p2p/server.go b/p2p/server.go index 5b9a4aa71fdc..13eebed3f4f1 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "net" + "net/netip" "slices" "sync" "sync/atomic" @@ -190,8 +191,8 @@ type Server struct { nodedb *enode.DB localnode *enode.LocalNode - ntab *discover.UDPv4 - DiscV5 *discover.UDPv5 + discv4 *discover.UDPv4 + discv5 *discover.UDPv5 discmix *enode.FairMix dialsched *dialScheduler @@ -400,6 +401,16 @@ func (srv *Server) Self() *enode.Node { return ln.Node() } +// DiscoveryV4 returns the discovery v4 instance, if configured. +func (srv *Server) DiscoveryV4() *discover.UDPv4 { + return srv.discv4 +} + +// DiscoveryV5 returns the discovery v5 instance, if configured. +func (srv *Server) DiscoveryV5() *discover.UDPv5 { + return srv.discv5 +} + // Stop terminates the server and all active peer connections. // It blocks until all active connections have been closed. func (srv *Server) Stop() { @@ -425,11 +436,11 @@ type sharedUDPConn struct { unhandled chan discover.ReadPacket } -// ReadFromUDP implements discover.UDPConn -func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { +// ReadFromUDPAddrPort implements discover.UDPConn +func (s *sharedUDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { packet, ok := <-s.unhandled if !ok { - return 0, nil, errors.New("connection was closed") + return 0, netip.AddrPort{}, errors.New("connection was closed") } l := len(packet.Data) if l > len(b) { @@ -547,13 +558,13 @@ func (srv *Server) setupDiscovery() error { ) // If both versions of discovery are running, setup a shared // connection, so v5 can read unhandled messages from v4. - if srv.DiscoveryV4 && srv.DiscoveryV5 { + if srv.Config.DiscoveryV4 && srv.Config.DiscoveryV5 { unhandled = make(chan discover.ReadPacket, 100) sconn = &sharedUDPConn{conn, unhandled} } // Start discovery services. - if srv.DiscoveryV4 { + if srv.Config.DiscoveryV4 { cfg := discover.Config{ PrivateKey: srv.PrivateKey, NetRestrict: srv.NetRestrict, @@ -565,17 +576,17 @@ func (srv *Server) setupDiscovery() error { if err != nil { return err } - srv.ntab = ntab + srv.discv4 = ntab srv.discmix.AddSource(ntab.RandomNodes()) } - if srv.DiscoveryV5 { + if srv.Config.DiscoveryV5 { cfg := discover.Config{ PrivateKey: srv.PrivateKey, NetRestrict: srv.NetRestrict, Bootnodes: srv.BootstrapNodesV5, Log: srv.log, } - srv.DiscV5, err = discover.ListenV5(sconn, srv.localnode, cfg) + srv.discv5, err = discover.ListenV5(sconn, srv.localnode, cfg) if err != nil { return err } @@ -602,8 +613,8 @@ func (srv *Server) setupDialScheduler() { dialer: srv.Dialer, clock: srv.clock, } - if srv.ntab != nil { - config.resolver = srv.ntab + if srv.discv4 != nil { + config.resolver = srv.discv4 } if config.dialer == nil { config.dialer = tcpDialer{&net.Dialer{Timeout: defaultDialTimeout}} @@ -799,11 +810,11 @@ running: srv.log.Trace("P2P networking is spinning down") // Terminate discovery. If there is a running lookup it will terminate soon. - if srv.ntab != nil { - srv.ntab.Close() + if srv.discv4 != nil { + srv.discv4.Close() } - if srv.DiscV5 != nil { - srv.DiscV5.Close() + if srv.discv5 != nil { + srv.discv5.Close() } // Disconnect all peers. for _, p := range peers { diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 349e496b2f68..0efe9744a5c1 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "maps" "math" "net" "sync" @@ -215,10 +216,7 @@ func (sn *SimNode) ServeRPC(conn *websocket.Conn) error { // simulation_snapshot RPC method func (sn *SimNode) Snapshots() (map[string][]byte, error) { sn.lock.RLock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } + services := maps.Clone(sn.running) sn.lock.RUnlock() if len(services) == 0 { return nil, errors.New("no running services") @@ -315,11 +313,7 @@ func (sn *SimNode) Services() []node.Lifecycle { func (sn *SimNode) ServiceMap() map[string]node.Lifecycle { sn.lock.RLock() defer sn.lock.RUnlock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } - return services + return maps.Clone(sn.running) } // Server returns the underlying p2p.Server diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go index f34315f17097..e18aaacc334a 100644 --- a/p2p/simulations/adapters/types.go +++ b/p2p/simulations/adapters/types.go @@ -42,7 +42,6 @@ import ( // // - SimNode, an in-memory node in the same process // - ExecNode, a child process node -// - DockerNode, a node running in a Docker container type Node interface { // Addr returns the node's address (e.g. an Enode URL) Addr() []byte diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go index 70b35ad77742..b0b8f22fdb72 100644 --- a/p2p/simulations/examples/ping-pong.go +++ b/p2p/simulations/examples/ping-pong.go @@ -33,7 +33,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/simulations/adapters" ) -var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim", "exec" or "docker")`) +var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim" or "exec")`) // main() starts a simulation network which contains nodes running a simple // ping-pong protocol diff --git a/params/config.go b/params/config.go index d754edcd6f8f..130a065e3791 100644 --- a/params/config.go +++ b/params/config.go @@ -573,21 +573,26 @@ func (c *ChainConfig) IsShanghai(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.ShanghaiTime, time) } -// IsCancun returns whether num is either equal to the Cancun fork time or greater. +// IsCancun returns whether time is either equal to the Cancun fork time or greater. func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.CancunTime, time) } -// IsPrague returns whether num is either equal to the Prague fork time or greater. +// IsPrague returns whether time is either equal to the Prague fork time or greater. func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } -// IsVerkle returns whether num is either equal to the Verkle fork time or greater. +// IsVerkle returns whether time is either equal to the Verkle fork time or greater. func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) } +// IsEIP4762 returns whether eip 4762 has been activated at given block. +func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool { + return c.IsVerkle(num, time) +} + // IsRIP7560 returns whether num is either equal to the RIP7560 fork block or greater. func (c *ChainConfig) IsRIP7560(num *big.Int) bool { return isBlockForked(c.RIP7560Block, num) @@ -892,7 +897,7 @@ func newTimestampCompatError(what string, storedtime, newtime *uint64) *ConfigCo NewTime: newtime, RewindToTime: 0, } - if rew != nil { + if rew != nil && *rew != 0 { err.RewindToTime = *rew - 1 } return err @@ -902,7 +907,15 @@ func (err *ConfigCompatError) Error() string { if err.StoredBlock != nil { return fmt.Sprintf("mismatching %s in database (have block %d, want block %d, rewindto block %d)", err.What, err.StoredBlock, err.NewBlock, err.RewindToBlock) } - return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, err.StoredTime, err.NewTime, err.RewindToTime) + + if err.StoredTime == nil && err.NewTime == nil { + return "" + } else if err.StoredTime == nil && err.NewTime != nil { + return fmt.Sprintf("mismatching %s in database (have timestamp nil, want timestamp %d, rewindto timestamp %d)", err.What, *err.NewTime, err.RewindToTime) + } else if err.StoredTime != nil && err.NewTime == nil { + return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp nil, rewindto timestamp %d)", err.What, *err.StoredTime, err.RewindToTime) + } + return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, *err.StoredTime, *err.NewTime, err.RewindToTime) } // Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions @@ -913,6 +926,7 @@ func (err *ConfigCompatError) Error() string { type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool + IsEIP2929, IsEIP4762 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool @@ -927,6 +941,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules } // disallow setting Merge out of order isMerge = isMerge && c.IsLondon(num) + isVerkle := isMerge && c.IsVerkle(num, timestamp) return Rules{ ChainID: new(big.Int).Set(chainID), IsHomestead: c.IsHomestead(num), @@ -938,11 +953,13 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), IsBerlin: c.IsBerlin(num), + IsEIP2929: c.IsBerlin(num) && !isVerkle, IsLondon: c.IsLondon(num), IsMerge: isMerge, IsShanghai: isMerge && c.IsShanghai(num, timestamp), IsCancun: isMerge && c.IsCancun(num, timestamp), IsPrague: isMerge && c.IsPrague(num, timestamp), - IsVerkle: isMerge && c.IsVerkle(num, timestamp), + IsVerkle: isVerkle, + IsEIP4762: isVerkle, } } diff --git a/params/config_test.go b/params/config_test.go index bf8ce2fc5e24..fa444a1d0b76 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/math" + "github.com/stretchr/testify/require" ) func TestCheckCompatible(t *testing.T) { @@ -137,3 +138,20 @@ func TestConfigRules(t *testing.T) { t.Errorf("expected %v to be shanghai", stamp) } } + +func TestTimestampCompatError(t *testing.T) { + require.Equal(t, new(ConfigCompatError).Error(), "") + + errWhat := "Shanghai fork timestamp" + require.Equal(t, newTimestampCompatError(errWhat, nil, newUint64(1681338455)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp nil, want timestamp 1681338455, rewindto timestamp 1681338454)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(1681338455), nil).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 1681338455, want timestamp nil, rewindto timestamp 1681338454)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(1681338455), newUint64(600624000)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 1681338455, want timestamp 600624000, rewindto timestamp 600623999)") + + require.Equal(t, newTimestampCompatError(errWhat, newUint64(0), newUint64(1681338455)).Error(), + "mismatching Shanghai fork timestamp in database (have timestamp 0, want timestamp 1681338455, rewindto timestamp 0)") +} diff --git a/params/protocol_params.go b/params/protocol_params.go index 863cf58ece46..8ffe8ee75db1 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -86,6 +86,7 @@ const ( LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. Create2Gas uint64 = 32000 // Once per CREATE2 operation + CreateNGasEip4762 uint64 = 1000 // Once per CREATEn operations post-verkle SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. @@ -186,6 +187,10 @@ var ( // BeaconRootsAddress is the address where historical beacon roots are stored as per EIP-4788 BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02") + + // BeaconRootsCode is the code where historical beacon roots are stored as per EIP-4788 + BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500") + // SystemAddress is where the system-transaction is sent from as per EIP-4788 SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe") ) diff --git a/params/verkle_params.go b/params/verkle_params.go new file mode 100644 index 000000000000..93d4f7cd6476 --- /dev/null +++ b/params/verkle_params.go @@ -0,0 +1,36 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package params + +// Verkle tree EIP: costs associated to witness accesses +var ( + WitnessBranchReadCost uint64 = 1900 + WitnessChunkReadCost uint64 = 200 + WitnessBranchWriteCost uint64 = 3000 + WitnessChunkWriteCost uint64 = 500 + WitnessChunkFillCost uint64 = 6200 +) + +// ClearVerkleWitnessCosts sets all witness costs to 0, which is necessary +// for historical block replay simulations. +func ClearVerkleWitnessCosts() { + WitnessBranchReadCost = 0 + WitnessChunkReadCost = 0 + WitnessBranchWriteCost = 0 + WitnessChunkWriteCost = 0 + WitnessChunkFillCost = 0 +} diff --git a/params/version.go b/params/version.go index 319f21b2a8b4..a0e2de5a4941 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 14 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 4 // Patch version component of the current release VersionMeta = "unstable" // Version metadata to append to the version string ) diff --git a/tests/init.go b/tests/init.go index e333587a07a7..c85e714c0023 100644 --- a/tests/init.go +++ b/tests/init.go @@ -212,7 +212,7 @@ var Forks = map[string]*params.ChainConfig{ LondonBlock: big.NewInt(0), ArrowGlacierBlock: big.NewInt(0), }, - "ArrowGlacierToMergeAtDiffC0000": { + "ArrowGlacierToParisAtDiffC0000": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -246,6 +246,23 @@ var Forks = map[string]*params.ChainConfig{ ArrowGlacierBlock: big.NewInt(0), GrayGlacierBlock: big.NewInt(0), }, + "Paris": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + }, "Merge": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), @@ -281,7 +298,7 @@ var Forks = map[string]*params.ChainConfig{ TerminalTotalDifficulty: big.NewInt(0), ShanghaiTime: u64(0), }, - "MergeToShanghaiAtTime15k": { + "ParisToShanghaiAtTime15k": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), diff --git a/tests/state_test.go b/tests/state_test.go index 6f53b88722d6..76fec97de0ee 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -54,14 +54,6 @@ func initMatcher(st *testMatcher) { // Uses 1GB RAM per tested fork st.skipLoad(`^stStaticCall/static_Call1MB`) - // These tests fail as of https://github.com/ethereum/go-ethereum/pull/28666, since we - // no longer delete "leftover storage" when deploying a contract. - st.skipLoad(`^stSStoreTest/InitCollision\.json`) - st.skipLoad(`^stRevertTest/RevertInCreateInInit\.json`) - st.skipLoad(`^stExtCodeHash/dynamicAccountOverwriteEmpty\.json`) - st.skipLoad(`^stCreate2/create2collisionStorage\.json`) - st.skipLoad(`^stCreate2/RevertInCreateInInitCreate2\.json`) - // Broken tests: // EOF is not part of cancun st.skipLoad(`^stEOF/`) diff --git a/tests/testdata b/tests/testdata index fa51c5c164f7..faf33b471465 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit fa51c5c164f79140730ccb8fe26a46c3d3994338 +Subproject commit faf33b471465d3c6cdc3d04fbd690895f78d33f2 diff --git a/trie/hasher.go b/trie/hasher.go index 1e063d8020b9..abf654c709cf 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // hasher is a type used for the trie Hash operation. A hasher has some @@ -38,7 +37,7 @@ var hasherPool = sync.Pool{ New: func() interface{} { return &hasher{ tmp: make([]byte, 0, 550), // cap is as large as a full fullNode. - sha: sha3.NewLegacyKeccak256().(crypto.KeccakState), + sha: crypto.NewKeccakState(), encbuf: rlp.NewEncoderBuffer(nil), } }, diff --git a/trie/secure_trie.go b/trie/secure_trie.go index efd4dfb5d33f..e38d5ac4dc36 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -284,3 +284,7 @@ func (t *StateTrie) getSecKeyCache() map[string][]byte { } return t.secKeyCache } + +func (t *StateTrie) IsVerkle() bool { + return false +} diff --git a/trie/stacktrie_fuzzer_test.go b/trie/stacktrie_fuzzer_test.go index 5126e0bd07ce..418b941d94e1 100644 --- a/trie/stacktrie_fuzzer_test.go +++ b/trie/stacktrie_fuzzer_test.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) func FuzzStackTrie(f *testing.F) { @@ -41,10 +40,10 @@ func fuzz(data []byte, debugging bool) { // This spongeDb is used to check the sequence of disk-db-writes var ( input = bytes.NewReader(data) - spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()} + spongeA = &spongeDb{sponge: crypto.NewKeccakState()} dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme) trieA = NewEmpty(dbA) - spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} + spongeB = &spongeDb{sponge: crypto.NewKeccakState()} dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme) trieB = NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme()) diff --git a/trie/sync.go b/trie/sync.go index 589d28364b87..3b7caae5b103 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -22,9 +22,11 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -148,15 +150,42 @@ type CodeSyncResult struct { // nodeOp represents an operation upon the trie node. It can either represent a // deletion to the specific node or a node write for persisting retrieved node. type nodeOp struct { + del bool // flag if op stands for a delete operation owner common.Hash // identifier of the trie (empty for account trie) path []byte // path from the root to the specified node. blob []byte // the content of the node (nil for deletion) hash common.Hash // hash of the node content (empty for node deletion) } -// isDelete indicates if the operation is a database deletion. -func (op *nodeOp) isDelete() bool { - return len(op.blob) == 0 +// valid checks whether the node operation is valid. +func (op *nodeOp) valid() bool { + if op.del && len(op.blob) != 0 { + return false + } + if !op.del && len(op.blob) == 0 { + return false + } + return true +} + +// string returns the node operation in string representation. +func (op *nodeOp) string() string { + var node string + if op.owner == (common.Hash{}) { + node = fmt.Sprintf("node: (%v)", op.path) + } else { + node = fmt.Sprintf("node: (%x-%v)", op.owner, op.path) + } + var blobHex string + if len(op.blob) == 0 { + blobHex = "nil" + } else { + blobHex = hexutil.Encode(op.blob) + } + if op.del { + return fmt.Sprintf("del %s %s %s", node, blobHex, op.hash.Hex()) + } + return fmt.Sprintf("write %s %s %s", node, blobHex, op.hash.Hex()) } // syncMemBatch is an in-memory buffer of successfully downloaded but not yet @@ -219,6 +248,7 @@ func (batch *syncMemBatch) delNode(owner common.Hash, path []byte) { batch.size += common.HashLength + uint64(len(path)) } batch.nodes = append(batch.nodes, nodeOp{ + del: true, owner: owner, path: path, }) @@ -427,7 +457,10 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { storage int ) for _, op := range s.membatch.nodes { - if op.isDelete() { + if !op.valid() { + return fmt.Errorf("invalid op, %s", op.string()) + } + if op.del { // node deletion is only supported in path mode. if op.owner == (common.Hash{}) { rawdb.DeleteAccountTrieNode(dbw, op.path) @@ -546,9 +579,9 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { // the performance impact negligible. var exists bool if owner == (common.Hash{}) { - exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...)) + exists = rawdb.HasAccountTrieNode(s.database, append(inner, key[:i]...)) } else { - exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...)) + exists = rawdb.HasStorageTrieNode(s.database, owner, append(inner, key[:i]...)) } if exists { s.membatch.delNode(owner, append(inner, key[:i]...)) @@ -691,13 +724,14 @@ func (s *Sync) hasNode(owner common.Hash, path []byte, hash common.Hash) (exists } // If node is running with path scheme, check the presence with node path. var blob []byte - var dbHash common.Hash if owner == (common.Hash{}) { - blob, dbHash = rawdb.ReadAccountTrieNode(s.database, path) + blob = rawdb.ReadAccountTrieNode(s.database, path) } else { - blob, dbHash = rawdb.ReadStorageTrieNode(s.database, owner, path) + blob = rawdb.ReadStorageTrieNode(s.database, owner, path) } - exists = hash == dbHash + h := newBlobHasher() + defer h.release() + exists = hash == h.hash(blob) inconsistent = !exists && len(blob) != 0 return exists, inconsistent } @@ -712,3 +746,23 @@ func ResolvePath(path []byte) (common.Hash, []byte) { } return owner, path } + +// blobHasher is used to compute the sha256 hash of the provided data. +type blobHasher struct{ state crypto.KeccakState } + +// blobHasherPool is the pool for reusing pre-allocated hash state. +var blobHasherPool = sync.Pool{ + New: func() interface{} { return &blobHasher{state: crypto.NewKeccakState()} }, +} + +func newBlobHasher() *blobHasher { + return blobHasherPool.Get().(*blobHasher) +} + +func (h *blobHasher) hash(data []byte) common.Hash { + return crypto.HashData(h.state, data) +} + +func (h *blobHasher) release() { + blobHasherPool.Put(h) +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 6ecd20c21894..da60a7423dff 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -886,7 +886,7 @@ func TestCommitSequence(t *testing.T) { } { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes - s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} + s := &spongeDb{sponge: crypto.NewKeccakState()} db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme) trie := NewEmpty(db) // Fill the trie with elements @@ -917,7 +917,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { } { prng := rand.New(rand.NewSource(int64(i))) // This spongeDb is used to check the sequence of disk-db-writes - s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} + s := &spongeDb{sponge: crypto.NewKeccakState()} db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme) trie := NewEmpty(db) // Fill the trie with elements diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 055db8822e72..aa8a0f6d99b6 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -114,7 +114,12 @@ func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error { set.updates -= 1 } } - set.AddNode([]byte(path), node) + if node.IsDeleted() { + set.deletes += 1 + } else { + set.updates += 1 + } + set.Nodes[path] = node } return nil } @@ -130,16 +135,6 @@ func (set *NodeSet) Size() (int, int) { return set.updates, set.deletes } -// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can -// we get rid of it? -func (set *NodeSet) Hashes() []common.Hash { - ret := make([]common.Hash, 0, len(set.Nodes)) - for _, node := range set.Nodes { - ret = append(ret, node.Hash) - } - return ret -} - // Summary returns a string-representation of the NodeSet. func (set *NodeSet) Summary() string { var out = new(strings.Builder) diff --git a/trie/trienode/node_test.go b/trie/trienode/node_test.go new file mode 100644 index 000000000000..bcb3a2202b53 --- /dev/null +++ b/trie/trienode/node_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package trienode + +import ( + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func BenchmarkMerge(b *testing.B) { + b.Run("1K", func(b *testing.B) { + benchmarkMerge(b, 1000) + }) + b.Run("10K", func(b *testing.B) { + benchmarkMerge(b, 10_000) + }) +} + +func benchmarkMerge(b *testing.B, count int) { + x := NewNodeSet(common.Hash{}) + y := NewNodeSet(common.Hash{}) + addNode := func(s *NodeSet) { + path := make([]byte, 4) + rand.Read(path) + blob := make([]byte, 32) + rand.Read(blob) + hash := crypto.Keccak256Hash(blob) + s.AddNode(path, New(hash, blob)) + } + for i := 0; i < count; i++ { + // Random path of 4 nibbles + addNode(x) + addNode(y) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Store set x into a backup + z := NewNodeSet(common.Hash{}) + z.Merge(common.Hash{}, x.Nodes) + // Merge y into x + x.Merge(common.Hash{}, y.Nodes) + x = z + } +} diff --git a/trie/triestate/state.go b/trie/triestate/state.go index aa4d32f852f9..9db9211e8c87 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" - "golang.org/x/crypto/sha3" ) // Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia @@ -257,7 +256,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 328b2d252761..2a4a632d4926 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -23,7 +23,7 @@ import ( "github.com/crate-crypto/go-ipa/bandersnatch/fr" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/metrics" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) @@ -219,7 +219,7 @@ func CodeChunkKey(address []byte, chunk *uint256.Int) []byte { return GetTreeKey(address, treeIndex, subIndex) } -func storageIndex(bytes []byte) (*uint256.Int, byte) { +func StorageIndex(bytes []byte) (*uint256.Int, byte) { // If the storage slot is in the header, we need to add the header offset. var key uint256.Int key.SetBytes(bytes) @@ -245,7 +245,7 @@ func storageIndex(bytes []byte) (*uint256.Int, byte) { // StorageSlotKey returns the verkle tree key of the storage slot for the // specified account. func StorageSlotKey(address []byte, storageKey []byte) []byte { - treeIndex, subIndex := storageIndex(storageKey) + treeIndex, subIndex := StorageIndex(storageKey) return GetTreeKey(address, treeIndex, subIndex) } @@ -296,7 +296,7 @@ func CodeChunkKeyWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256 // slot for the specified account. The difference between StorageSlotKey is the // address evaluation is already computed to minimize the computational overhead. func StorageSlotKeyWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { - treeIndex, subIndex := storageIndex(storageKey) + treeIndex, subIndex := StorageIndex(storageKey) return GetTreeKeyWithEvaluatedAddress(evaluated, treeIndex, subIndex) } diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index 28b059c3794e..c29504a6d0cb 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -20,7 +20,7 @@ import ( "bytes" "testing" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/trie/verkle.go b/trie/verkle.go index 01d813d9ec9b..bb0c54857f6d 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb/database" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/triedb/database.go b/triedb/database.go index 261a47dcc2c7..10f77982f336 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -20,6 +20,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" @@ -48,9 +49,6 @@ var HashDefaults = &Config{ // backend defines the methods needed to access/update trie nodes in different // state scheme. type backend interface { - // Scheme returns the identifier of used storage scheme. - Scheme() string - // Initialized returns an indicator if the state data is already initialized // according to the state scheme. Initialized(genesisRoot common.Hash) bool @@ -181,7 +179,10 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Scheme returns the node scheme used in the database. func (db *Database) Scheme() string { - return db.backend.Scheme() + if db.config.PathDB != nil { + return rawdb.PathScheme + } + return rawdb.HashScheme } // Close flushes the dangling preimages to disk and closes the trie database. diff --git a/triedb/database/database.go b/triedb/database/database.go index 18a8f454e2f4..f11c7e9bbd3e 100644 --- a/triedb/database/database.go +++ b/triedb/database/database.go @@ -25,6 +25,9 @@ type Reader interface { // Node retrieves the trie node blob with the provided trie identifier, // node path and the corresponding node hash. No error will be returned // if the node is not found. + // + // Don't modify the returned byte slice since it's not deep-copied and + // still be referenced by database. Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) } diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 7d5499eb693a..ebb5d7205712 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -623,11 +623,6 @@ func (db *Database) Close() error { return nil } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.HashScheme -} - // Reader retrieves a node reader belonging to the given state root. // An error will be returned if the requested state is not available. func (db *Database) Reader(root common.Hash) (*reader, error) { diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 18f2eeef00ce..bd6aeaa6abe4 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -131,15 +132,15 @@ type Database struct { // readOnly is the flag whether the mutation is allowed to be applied. // It will be set automatically when the database is journaled during // the shutdown to reject all following unexpected mutations. - readOnly bool // Flag if database is opened in read only mode - waitSync bool // Flag if database is deactivated due to initial state sync - isVerkle bool // Flag if database is used for verkle tree - bufferSize int // Memory allowance (in bytes) for caching dirty nodes - config *Config // Configuration for database - diskdb ethdb.Database // Persistent storage for matured trie nodes - tree *layerTree // The group for all known layers - freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests - lock sync.RWMutex // Lock to prevent mutations from happening at the same time + readOnly bool // Flag if database is opened in read only mode + waitSync bool // Flag if database is deactivated due to initial state sync + isVerkle bool // Flag if database is used for verkle tree + bufferSize int // Memory allowance (in bytes) for caching dirty nodes + config *Config // Configuration for database + diskdb ethdb.Database // Persistent storage for matured trie nodes + tree *layerTree // The group for all known layers + freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests + lock sync.RWMutex // Lock to prevent mutations from happening at the same time } // New attempts to load an already existing layer from a persistent key-value @@ -162,45 +163,10 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { // and in-memory layer journal. db.tree = newLayerTree(db.loadLayers()) - // Open the freezer for state history if the passed database contains an - // ancient store. Otherwise, all the relevant functionalities are disabled. - // - // Because the freezer can only be opened once at the same time, this - // mechanism also ensures that at most one **non-readOnly** database - // is opened at the same time to prevent accidental mutation. - if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { - freezer, err := rawdb.NewStateFreezer(ancient, false) - if err != nil { - log.Crit("Failed to open state history freezer", "err", err) - } - db.freezer = freezer - - diskLayerID := db.tree.bottom().stateID() - if diskLayerID == 0 { - // Reset the entire state histories in case the trie database is - // not initialized yet, as these state histories are not expected. - frozen, err := db.freezer.Ancients() - if err != nil { - log.Crit("Failed to retrieve head of state history", "err", err) - } - if frozen != 0 { - err := db.freezer.Reset() - if err != nil { - log.Crit("Failed to reset state histories", "err", err) - } - log.Info("Truncated extraneous state history") - } - } else { - // Truncate the extra state histories above in freezer in case - // it's not aligned with the disk layer. - pruned, err := truncateFromHead(db.diskdb, freezer, diskLayerID) - if err != nil { - log.Crit("Failed to truncate extra state histories", "err", err) - } - if pruned != 0 { - log.Warn("Truncated extra state histories", "number", pruned) - } - } + // Repair the state history, which might not be aligned with the state + // in the key-value store due to an unclean shutdown. + if err := db.repairHistory(); err != nil { + log.Crit("Failed to repair pathdb", "err", err) } // Disable database in case node is still in the initial state sync stage. if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly { @@ -211,6 +177,55 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database { return db } +// repairHistory truncates leftover state history objects, which may occur due +// to an unclean shutdown or other unexpected reasons. +func (db *Database) repairHistory() error { + // Open the freezer for state history. This mechanism ensures that + // only one database instance can be opened at a time to prevent + // accidental mutation. + ancient, err := db.diskdb.AncientDatadir() + if err != nil { + // TODO error out if ancient store is disabled. A tons of unit tests + // disable the ancient store thus the error here will immediately fail + // all of them. Fix the tests first. + return nil + } + freezer, err := rawdb.NewStateFreezer(ancient, db.readOnly) + if err != nil { + log.Crit("Failed to open state history freezer", "err", err) + } + db.freezer = freezer + + // Reset the entire state histories if the trie database is not initialized + // yet. This action is necessary because these state histories are not + // expected to exist without an initialized trie database. + id := db.tree.bottom().stateID() + if id == 0 { + frozen, err := db.freezer.Ancients() + if err != nil { + log.Crit("Failed to retrieve head of state history", "err", err) + } + if frozen != 0 { + err := db.freezer.Reset() + if err != nil { + log.Crit("Failed to reset state histories", "err", err) + } + log.Info("Truncated extraneous state history") + } + return nil + } + // Truncate the extra state histories above in freezer in case it's not + // aligned with the disk layer. It might happen after a unclean shutdown. + pruned, err := truncateFromHead(db.diskdb, db.freezer, id) + if err != nil { + log.Crit("Failed to truncate extra state histories", "err", err) + } + if pruned != 0 { + log.Warn("Truncated extra state histories", "number", pruned) + } + return nil +} + // Update adds a new layer into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). Apart // from that this function will flatten the extra diff layers at bottom into disk @@ -292,8 +307,10 @@ func (db *Database) Enable(root common.Hash) error { } // Ensure the provided state root matches the stored one. root = types.TrieRootHash(root) - _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil) - stored = types.TrieRootHash(stored) + stored := types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + stored = crypto.Keccak256Hash(blob) + } if stored != root { return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root) } @@ -466,11 +483,6 @@ func (db *Database) SetBufferSize(size int) error { return db.tree.bottom().setBufferSize(db.bufferSize) } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.PathScheme -} - // modifyAllowed returns the indicator if mutation is allowed. This function // assumes the db.lock is already held. func (db *Database) modifyAllowed() error { diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 29de534589d2..7b240823154d 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -474,7 +474,7 @@ func TestDisable(t *testing.T) { tester := newTester(t, 0) defer tester.release() - _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) if err := tester.db.Disable(); err != nil { t.Fatalf("Failed to deactivate database: %v", err) } @@ -580,7 +580,7 @@ func TestCorruptedJournal(t *testing.T) { t.Errorf("Failed to journal, err: %v", err) } tester.db.Close() - _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + root := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) // Mutate the journal in disk, it should be regarded as invalid blob := rawdb.ReadTrieJournal(tester.db.diskdb) diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go index bf4c6502efbd..1e93a3f89214 100644 --- a/triedb/pathdb/difflayer_test.go +++ b/triedb/pathdb/difflayer_test.go @@ -70,10 +70,10 @@ func benchmarkSearch(b *testing.B, depth int, total int) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node if npath == nil && depth == index { npath = common.CopyBytes(path) - nblob = common.CopyBytes(node.Blob) + nblob = common.CopyBytes(blob) } } return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) @@ -116,7 +116,7 @@ func BenchmarkPersist(b *testing.B) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node } return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) } @@ -154,7 +154,7 @@ func BenchmarkJournal(b *testing.B) { blob = testrand.Bytes(100) node = trienode.New(crypto.Keccak256Hash(blob), blob) ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + nodes[common.Hash{}][string(path)] = node } // TODO(rjl493456442) a non-nil state set is expected. return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index ec7c91bcacfd..964ad2ef777d 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" - "golang.org/x/crypto/sha3" ) // diskLayer is a low level persistent layer built on top of a key-value store. @@ -117,12 +116,12 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co dirtyMissMeter.Mark(1) // Try to retrieve the trie node from the clean memory cache + h := newHasher() + defer h.release() + key := cacheKey(owner, path) if dl.cleans != nil { if blob := dl.cleans.Get(nil, key); len(blob) > 0 { - h := newHasher() - defer h.release() - cleanHitMeter.Mark(1) cleanReadMeter.Mark(int64(len(blob))) return blob, h.hash(blob), &nodeLoc{loc: locCleanCache, depth: depth}, nil @@ -130,20 +129,18 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co cleanMissMeter.Mark(1) } // Try to retrieve the trie node from the disk. - var ( - nBlob []byte - nHash common.Hash - ) + var blob []byte if owner == (common.Hash{}) { - nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) + blob = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) } else { - nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) + blob = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) } - if dl.cleans != nil && len(nBlob) > 0 { - dl.cleans.Set(key, nBlob) - cleanWriteMeter.Mark(int64(len(nBlob))) + if dl.cleans != nil && len(blob) > 0 { + dl.cleans.Set(key, blob) + cleanWriteMeter.Mark(int64(len(blob))) } - return nBlob, nHash, &nodeLoc{loc: locDiskLayer, depth: depth}, nil + + return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil } // update implements the layer interface, returning a new diff layer on top @@ -303,7 +300,7 @@ func (dl *diskLayer) resetCache() { type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: crypto.NewKeccakState()} }, } func newHasher() *hasher { diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 7099b2b381f2..3663cbbdb9a1 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -472,8 +472,8 @@ func (h *history) decode(accountData, storageData, accountIndexes, storageIndexe } // readHistory reads and decodes the state history object by the given id. -func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) { - blob := rawdb.ReadStateHistoryMeta(freezer, id) +func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) { + blob := rawdb.ReadStateHistoryMeta(reader, id) if len(blob) == 0 { return nil, fmt.Errorf("state history not found %d", id) } @@ -483,10 +483,10 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) } var ( dec = history{meta: &m} - accountData = rawdb.ReadStateAccountHistory(freezer, id) - storageData = rawdb.ReadStateStorageHistory(freezer, id) - accountIndexes = rawdb.ReadStateAccountIndex(freezer, id) - storageIndexes = rawdb.ReadStateStorageIndex(freezer, id) + accountData = rawdb.ReadStateAccountHistory(reader, id) + storageData = rawdb.ReadStateStorageHistory(reader, id) + accountIndexes = rawdb.ReadStateAccountIndex(reader, id) + storageIndexes = rawdb.ReadStateStorageIndex(reader, id) ) if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { return nil, err @@ -495,7 +495,7 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) } // writeHistory persists the state history with the provided state set. -func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { +func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { // Short circuit if state set is not available. if dl.states == nil { return errors.New("state change set is not available") @@ -509,7 +509,7 @@ func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) // Write history data into five freezer table respectively. - rawdb.WriteStateHistory(freezer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) historyDataBytesMeter.Mark(int64(dataSize)) historyIndexBytesMeter.Mark(int64(indexSize)) @@ -521,13 +521,13 @@ func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { // checkHistories retrieves a batch of meta objects with the specified range // and performs the callback on each item. -func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error { +func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error { for count > 0 { number := count if number > 10000 { number = 10000 // split the big read into small chunks } - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number) + blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number) if err != nil { return err } @@ -548,12 +548,12 @@ func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check // truncateFromHead removes the extra state histories from the head with the given // parameters. It returns the number of items removed from the head. -func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) { - ohead, err := freezer.Ancients() +func truncateFromHead(db ethdb.Batcher, store ethdb.AncientStore, nhead uint64) (int, error) { + ohead, err := store.Ancients() if err != nil { return 0, err } - otail, err := freezer.Tail() + otail, err := store.Tail() if err != nil { return 0, err } @@ -566,7 +566,7 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead return 0, nil } // Load the meta objects in range [nhead+1, ohead] - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead) + blobs, err := rawdb.ReadStateHistoryMetaList(store, nhead+1, ohead-nhead) if err != nil { return 0, err } @@ -581,7 +581,7 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead if err := batch.Write(); err != nil { return 0, err } - ohead, err = freezer.TruncateHead(nhead) + ohead, err = store.TruncateHead(nhead) if err != nil { return 0, err } @@ -590,12 +590,12 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead // truncateFromTail removes the extra state histories from the tail with the given // parameters. It returns the number of items removed from the tail. -func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) { - ohead, err := freezer.Ancients() +func truncateFromTail(db ethdb.Batcher, store ethdb.AncientStore, ntail uint64) (int, error) { + ohead, err := store.Ancients() if err != nil { return 0, err } - otail, err := freezer.Tail() + otail, err := store.Tail() if err != nil { return 0, err } @@ -608,7 +608,7 @@ func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail return 0, nil } // Load the meta objects in range [otail+1, ntail] - blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail) + blobs, err := rawdb.ReadStateHistoryMetaList(store, otail+1, ntail-otail) if err != nil { return 0, err } @@ -623,7 +623,7 @@ func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail if err := batch.Write(); err != nil { return 0, err } - otail, err = freezer.TruncateTail(ntail) + otail, err = store.TruncateTail(ntail) if err != nil { return 0, err } diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go index d8a761b91689..240474da37e4 100644 --- a/triedb/pathdb/history_inspect.go +++ b/triedb/pathdb/history_inspect.go @@ -21,7 +21,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -34,7 +34,7 @@ type HistoryStats struct { } // sanitizeRange limits the given range to fit within the local history store. -func sanitizeRange(start, end uint64, freezer *rawdb.ResettableFreezer) (uint64, uint64, error) { +func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint64, error) { // Load the id of the first history object in local store. tail, err := freezer.Tail() if err != nil { @@ -60,7 +60,7 @@ func sanitizeRange(start, end uint64, freezer *rawdb.ResettableFreezer) (uint64, return first, last, nil } -func inspectHistory(freezer *rawdb.ResettableFreezer, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) { +func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) { var ( stats = &HistoryStats{} init = time.Now() @@ -96,7 +96,7 @@ func inspectHistory(freezer *rawdb.ResettableFreezer, start, end uint64, onHisto } // accountHistory inspects the account history within the range. -func accountHistory(freezer *rawdb.ResettableFreezer, address common.Address, start, end uint64) (*HistoryStats, error) { +func accountHistory(freezer ethdb.AncientReader, address common.Address, start, end uint64) (*HistoryStats, error) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { blob, exists := h.accounts[address] if !exists { @@ -108,7 +108,7 @@ func accountHistory(freezer *rawdb.ResettableFreezer, address common.Address, st } // storageHistory inspects the storage history within the range. -func storageHistory(freezer *rawdb.ResettableFreezer, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { +func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { slots, exists := h.storages[address] if !exists { @@ -124,7 +124,7 @@ func storageHistory(freezer *rawdb.ResettableFreezer, address common.Address, sl } // historyRange returns the block number range of local state histories. -func historyRange(freezer *rawdb.ResettableFreezer) (uint64, uint64, error) { +func historyRange(freezer ethdb.AncientReader) (uint64, uint64, error) { // Load the id of the first history object in local store. tail, err := freezer.Tail() if err != nil { diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go index 81ac768acdc6..4114aa118532 100644 --- a/triedb/pathdb/history_test.go +++ b/triedb/pathdb/history_test.go @@ -102,7 +102,7 @@ func TestEncodeDecodeHistory(t *testing.T) { } } -func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, id uint64, root common.Hash, exist bool) { +func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) { blob := rawdb.ReadStateHistoryMeta(freezer, id) if exist && len(blob) == 0 { t.Fatalf("Failed to load trie history, %d", id) @@ -118,7 +118,7 @@ func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.Resettab } } -func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, from, to uint64, roots []common.Hash, exist bool) { +func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, from, to uint64, roots []common.Hash, exist bool) { for i, j := from, 0; i <= to; i, j = i+1, j+1 { checkHistory(t, db, freezer, i, roots[j], exist) } @@ -129,7 +129,7 @@ func TestTruncateHeadHistory(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -157,7 +157,7 @@ func TestTruncateTailHistory(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -200,7 +200,7 @@ func TestTruncateTailHistories(t *testing.T) { roots []common.Hash hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) ) defer freezer.Close() @@ -228,7 +228,7 @@ func TestTruncateOutOfRange(t *testing.T) { var ( hs = makeHistories(10) db = rawdb.NewMemoryDatabase() - freezer, _ = openFreezer(t.TempDir(), false) + freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false) ) defer freezer.Close() @@ -268,11 +268,6 @@ func TestTruncateOutOfRange(t *testing.T) { } } -// openFreezer initializes the freezer instance for storing state histories. -func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) { - return rawdb.NewStateFreezer(datadir, readOnly) -} - func compareSet[k comparable](a, b map[k][]byte) bool { if len(a) != len(b) { return false diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 3a0b7ebae273..1740ec593511 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -120,9 +120,10 @@ func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { // loadLayers loads a pre-existing state layer backed by a key-value store. func (db *Database) loadLayers() layer { // Retrieve the root node of persistent state. - _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) - root = types.TrieRootHash(root) - + var root = types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + root = crypto.Keccak256Hash(blob) + } // Load the layers by resolving the journal head, err := db.loadJournal(root) if err == nil { @@ -361,14 +362,13 @@ func (db *Database) Journal(root common.Hash) error { if err := rlp.Encode(journal, journalVersion); err != nil { return err } - // The stored state in disk might be empty, convert the - // root to emptyRoot in this case. - _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil) - diskroot = types.TrieRootHash(diskroot) - // Secondly write out the state root in disk, ensure all layers // on top are continuous with disk. - if err := rlp.Encode(journal, diskroot); err != nil { + diskRoot := types.EmptyRootHash + if blob := rawdb.ReadAccountTrieNode(db.diskdb, nil); len(blob) > 0 { + diskRoot = crypto.Keccak256Hash(blob) + } + if err := rlp.Encode(journal, diskRoot); err != nil { return err } // Finally write out the journal of each layer in reverse order. diff --git a/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go index 4a13fcc44e8c..ff0948410059 100644 --- a/triedb/pathdb/nodebuffer.go +++ b/triedb/pathdb/nodebuffer.go @@ -17,6 +17,7 @@ package pathdb import ( + "bytes" "fmt" "time" @@ -89,7 +90,7 @@ func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *no // The nodes belong to original diff layer are still accessible even // after merging, thus the ownership of nodes map should still belong // to original layer and any mutation on it should be prevented. - current = make(map[string]*trienode.Node) + current = make(map[string]*trienode.Node, len(subset)) for path, n := range subset { current[path] = n delta += int64(len(n.Blob) + len(path)) @@ -148,14 +149,14 @@ func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[s // // In case of database rollback, don't panic if this "clean" // node occurs which is not present in buffer. - var nhash common.Hash + var blob []byte if owner == (common.Hash{}) { - _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + blob = rawdb.ReadAccountTrieNode(db, []byte(path)) } else { - _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + blob = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) } // Ignore the clean node in the case described above. - if nhash == n.Hash { + if bytes.Equal(blob, n.Blob) { continue } panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) From 45ec501b4b4e98e5a5b1b442af4cca9953568ac2 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 4 Jun 2024 19:40:06 +0300 Subject: [PATCH 16/73] initial Process test (#7) initial test for the full "Process" call, to handle mixed legacy and AA transactions --- tests/rip7560/process_test.go | 112 ++++++++++++++++++++++++++++++ tests/rip7560/rip7560TestUtils.go | 8 +++ tests/rip7560/validation_test.go | 4 +- 3 files changed, 121 insertions(+), 3 deletions(-) create mode 100644 tests/rip7560/process_test.go diff --git a/tests/rip7560/process_test.go b/tests/rip7560/process_test.go new file mode 100644 index 000000000000..4d97c59dd759 --- /dev/null +++ b/tests/rip7560/process_test.go @@ -0,0 +1,112 @@ +// attempt to test Process, and how 7560 transaction affect normal TXs +package rip7560 + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/tests" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/assert" + "math/big" + "testing" +) + +/** +Test that "Process" of 7560 transactions doesn't alter legacy transaction processing. +the idea: +1. Run "Process" with a set of transactions L1 [AA1..AAn] L2 +2. Run "Process" just with the lagacy transactions L1,L2 +3. if AA transactions revert validation - make sure the legacy processing is intact. +4. if AA transactions are executed, make sure the needed state changes of the legacy transactions is intact +*/ + +const addr1 = "f39Fd6e51aad88F6F4ce6aB8827279cffFb92266" +const privKey1 = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + +const addr2 = "70997970C51812dc3A010C7d01b50e0d17dc79C8" +const privKey2 = "59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + +// initial minimal test that a valid AATX can be processed in a block +func TestProcess1(t *testing.T) { + + Sender := common.HexToAddress(DEFAULT_SENDER) + runProcess(newTestContextBuilder(t). + withAccount(addr1, 100000000000000). + withCode(DEFAULT_SENDER, createAccountCode(), 1000000000000000000). + build(), []*types.Rip7560AccountAbstractionTx{ + { + Sender: &Sender, + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + Data: []byte{1, 2, 3}, + }, + }) +} + +// run a set of AA transactions, with a legacy TXs before and after. +func runProcess(t *testContext, aatxs []*types.Rip7560AccountAbstractionTx) error { + var db ethdb.Database = rawdb.NewMemoryDatabase() + var state = tests.MakePreState(db, t.genesisAlloc, false, rawdb.HashScheme) + defer state.Close() + + cacheConfig := &core.CacheConfig{} + chainOverrides := core.ChainOverrides{} + engine := beacon.New(ethash.NewFaker()) + lookupLimit := uint64(0) + blockchain, err := core.NewBlockChain(db, cacheConfig, t.genesis, &chainOverrides, engine, + vm.Config{}, shouldPreserve, &lookupLimit) + if err != nil { + t.t.Fatalf("NewBlockChain failed: %v", err) + } + + signer := types.MakeSigner(blockchain.Config(), new(big.Int), 0) + key1, _ := crypto.HexToECDSA(privKey1) + if crypto.PubkeyToAddress(key1.PublicKey) != common.HexToAddress(addr1) { + t.t.Fatalf("sanity: addr1 doesn't match privKey1: should be %s", crypto.PubkeyToAddress(key1.PublicKey)) + } + //addr1 := crypto.PubkeyToAddress(key1.PublicKey) + + key2, _ := crypto.HexToECDSA(privKey2) + addr2 := crypto.PubkeyToAddress(key2.PublicKey) + + tx1, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: 0, + GasFeeCap: big.NewInt(1000000000), + Value: big.NewInt(1), + Gas: 30000, + To: &addr2, + }), signer, key1) + + tx3, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: 1, + GasFeeCap: big.NewInt(1000000000), + Value: big.NewInt(2), + Gas: 30000, + To: &addr2, + }), signer, key1) + + txs := []*types.Transaction{tx1} + for _, aatx := range aatxs { + txs = append(txs, types.NewTx(aatx)) + } + txs = append(txs, tx3) + + b := types.NewBlock(blockchain.CurrentBlock(), txs, nil, nil, trie.NewStackTrie(nil)) + _, _, _, err = blockchain.Processor().Process(b, state.StateDB, vm.Config{}) + if err != nil { + return err + } + assert.Equal(t.t, "0x3", state.StateDB.GetBalance(addr2).Hex(), "failed to process pre/post legacy transactions") + return nil +} + +func shouldPreserve(*types.Header) bool { + return false +} diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 9a330cbfc3df..d891d4b2237a 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -109,6 +109,14 @@ func returnData(data []byte) []byte { return ret } +// create bytecode for account +func createAccountCode() []byte { + magic := big.NewInt(0xbf45c166) + magic.Lsh(magic, 256-32) + + return returnData(magic.Bytes()) +} + // create EVM code from OpCode, byte and []bytes func createCode(items ...interface{}) []byte { var buffer bytes.Buffer diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 2ab9ca649031..357fc3a8e897 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -24,10 +24,8 @@ func TestValidation_OOG(t *testing.T) { } func TestValidation_ok(t *testing.T) { - magic := big.NewInt(0xbf45c166) - magic.Lsh(magic, 256-32) - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), 0), types.Rip7560AccountAbstractionTx{ + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "") From 8064306e4b7aa6d7a091d235ec7acbd3c47f6b00 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 4 Jun 2024 21:25:16 +0300 Subject: [PATCH 17/73] remove stub message (#8) * remove stub message was created just to create the vm.Txontext * update --- core/state_processor_rip7560.go | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index b5a5ae483fee..d05efdc9a535 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -114,10 +114,11 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St } func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { - stubMsg := prepareStubMessage(tx, chainConfig) blockContext := NewEVMBlockContext(header, bc, author) - txContext := NewEVMTxContext(stubMsg) - txContext.Origin = *tx.Rip7560TransactionData().Sender + txContext := vm.TxContext{ + Origin: *tx.Rip7560TransactionData().Sender, + GasPrice: tx.GasFeeCap(), + } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) /*** Deployer Frame ***/ deployerMsg := prepareDeployerMessage(tx, chainConfig) @@ -266,20 +267,6 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase } return receipt, err } -func prepareStubMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig) *Message { - tx := baseTx.Rip7560TransactionData() - return &Message{ - From: chainConfig.EntryPointAddress, - Value: big.NewInt(0), - GasLimit: 100000, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - AccessList: make(types.AccessList, 0), - SkipAccountChecks: true, - IsRip7560Frame: true, - } -} func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { tx := baseTx.Rip7560TransactionData() From 94156da3569cbe654fab462e212166e1c0e0f6bc Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 4 Jun 2024 20:30:51 +0200 Subject: [PATCH 18/73] WIP: create the 'debug_traceRip7560Validation' API --- eth/tracers/api.go | 3 + eth/tracers/api_tracing_rip7560.go | 141 +++++++++++++++++++++++ eth/tracers/native/rip7560_validation.go | 70 +++++++++++ internal/ethapi/transaction_args.go | 12 +- 4 files changed, 220 insertions(+), 6 deletions(-) create mode 100644 eth/tracers/api_tracing_rip7560.go create mode 100644 eth/tracers/native/rip7560_validation.go diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 51b55ffdbb1b..e04f503aeba6 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1022,6 +1022,9 @@ func APIs(backend Backend) []rpc.API { { Namespace: "debug", Service: NewAPI(backend), + }, { + Namespace: "debug", + Service: NewRip7560API(backend), }, } } diff --git a/eth/tracers/api_tracing_rip7560.go b/eth/tracers/api_tracing_rip7560.go new file mode 100644 index 000000000000..31d1b0a5c48e --- /dev/null +++ b/eth/tracers/api_tracing_rip7560.go @@ -0,0 +1,141 @@ +package tracers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/rpc" + "log" + "math/big" + "time" +) + +// Rip7560API is the collection of tracing APIs exposed over the private debugging endpoint. +type Rip7560API struct { + backend Backend +} + +func NewRip7560API(backend Backend) *Rip7560API { + return &Rip7560API{backend: backend} +} + +// TraceRip7560Validation mostly copied from 'tracers/api.go' file +func (api *Rip7560API) TraceRip7560Validation( + ctx context.Context, + args ethapi.TransactionArgs, + blockNrOrHash rpc.BlockNumberOrHash, + config *TraceCallConfig, +) (interface{}, error) { + number, _ := blockNrOrHash.Number() + block, err := api.blockByNumber(ctx, number) + if err != nil { + return nil, err + } + reexec := defaultTraceReexec + statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) + if err != nil { + return nil, err + } + defer release() + + vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + if err := args.CallDefaults(api.backend.RPCGasCap(), vmctx.BaseFee, api.backend.ChainConfig().ChainID); err != nil { + return nil, err + } + var ( + msg = args.ToMessage(vmctx.BaseFee) + tx = args.ToTransaction() + traceConfig *TraceConfig + ) + if config != nil { + traceConfig = &config.TraceConfig + } + traceResult, err := api.traceTx(ctx, tx, msg, new(Context), vmctx, statedb, traceConfig) + if err != nil { + return nil, err + } + log.Println("TraceRip7560Validation result") + log.Println(string(traceResult.(json.RawMessage))) + return traceResult, err +} + +//////// copy-pasted code + +// blockByNumber is the wrapper of the chain access function offered by the backend. +// It will return an error if the block is not found. +func (api *Rip7560API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + block, err := api.backend.BlockByNumber(ctx, number) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block #%d not found", number) + } + return block, nil +} + +// chainContext constructs the context reader which is used by the evm for reading +// the necessary chain context. +func (api *Rip7560API) chainContext(ctx context.Context) core.ChainContext { + return ethapi.NewChainContext(ctx, api.backend) +} + +func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, message *core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { + var ( + tracer *Tracer + err error + timeout = defaultTraceTimeout + usedGas uint64 + ) + if config == nil { + config = &TraceConfig{} + } + // Default tracer is the struct logger + //if config.Tracer == nil { + // logger := logger.NewStructLogger(config.Config) + // tracer = &Tracer{ + // Hooks: logger.Hooks(), + // GetResult: logger.GetResult, + // Stop: logger.Stop, + // } + //} else { + tracer, err = DefaultDirectory.New("rip7560Validation", txctx, config.TracerConfig) + // if err != nil { + // return nil, err + // } + //} + vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: big.NewInt(0)}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}) + statedb.SetLogger(tracer.Hooks) + + // Define a meaningful timeout of a single transaction trace + if config.Timeout != nil { + if timeout, err = time.ParseDuration(*config.Timeout); err != nil { + return nil, err + } + } + deadlineCtx, cancel := context.WithTimeout(ctx, timeout) + go func() { + <-deadlineCtx.Done() + if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { + tracer.Stop(errors.New("execution timeout")) + // Stop evm execution. Note cancellation is not necessarily immediate. + vmenv.Cancel() + } + }() + defer cancel() + + // Call Prepare to clear out the statedb access list + statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) + message.IsRip7560Frame = true + _, err = core.ApplyTransactionWithEVM(message, api.backend.ChainConfig(), new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, vmenv) + if err != nil { + return nil, fmt.Errorf("tracing failed: %w", err) + } + return tracer.GetResult() +} diff --git a/eth/tracers/native/rip7560_validation.go b/eth/tracers/native/rip7560_validation.go new file mode 100644 index 000000000000..336fb8edee6d --- /dev/null +++ b/eth/tracers/native/rip7560_validation.go @@ -0,0 +1,70 @@ +package native + +import ( + "encoding/json" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" +) + +func init() { + tracers.DefaultDirectory.Register("rip7560Validation", newRip7560Tracer, false) +} + +func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { + var config prestateTracerConfig + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, err + } + } + t := &rip7560ValidationTracer{ + TraceResults: make([]stateMap, 0), + UsedOpcodes: make([]map[byte]bool, 0), + Created: make([]map[common.Address]bool, 0), + Deleted: make([]map[common.Address]bool, 0), + } + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnOpcode: t.OnOpcode, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil +} + +// Array fields contain of all access details of all validation frames +type rip7560ValidationTracer struct { + env *tracing.VMContext + TraceResults []stateMap `json:"traceResults"` + UsedOpcodes []map[byte]bool `json:"usedOpcodes"` + Created []map[common.Address]bool `json:"created"` + Deleted []map[common.Address]bool `json:"deleted"` + // todo + //interrupt atomic.Bool // Atomic flag to signal execution interruption + //reason error // Textual reason for the interruption +} + +func (t *rip7560ValidationTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + +} + +func (t *rip7560ValidationTracer) OnTxEnd(receipt *types.Receipt, err error) { +} + +func (t *rip7560ValidationTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + fmt.Printf("%s %d %d", vm.OpCode(opcode).String(), cost, depth) +} + +func (t *rip7560ValidationTracer) GetResult() (json.RawMessage, error) { + jsonResult, err := json.MarshalIndent(*t, "", " ") + return jsonResult, err +} + +func (t *rip7560ValidationTracer) Stop(err error) { +} diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index ae935bdef96e..5683b0a2de71 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -79,13 +79,13 @@ type TransactionArgs struct { Sender *common.Address `json:"sender"` Signature *hexutil.Bytes Paymaster *common.Address `json:"paymaster,omitempty"` - PaymasterData *hexutil.Bytes `json:"paymasterData"` - Deployer *common.Address `json:"deployer,omitempty"` - DeployerData *hexutil.Bytes + PaymasterData *hexutil.Bytes `json:"paymasterData,omitempty"` + Deployer *common.Address `json:"factory,omitempty"` + DeployerData *hexutil.Bytes `json:"factoryData,omitempty"` BuilderFee *hexutil.Big - ValidationGas *hexutil.Uint64 - PaymasterGas *hexutil.Uint64 - PostOpGas *hexutil.Uint64 + ValidationGas *hexutil.Uint64 `json:"verificationGasLimit"` + PaymasterGas *hexutil.Uint64 `json:"paymasterVerificationGasLimit"` + PostOpGas *hexutil.Uint64 `json:"paymasterPostOpGasLimit"` } // from retrieves the transaction sender address. From 99b5f3c08f855fd8b8d00f505e3d4a8bf03ef72e Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Wed, 5 Jun 2024 22:44:16 +0200 Subject: [PATCH 19/73] WIP: Execute 'ApplyRip7560ValidationPhases' with 'rip7560Validation' tracer --- eth/tracers/api_tracing_rip7560.go | 20 ++++++++++++++------ eth/tracers/native/rip7560_validation.go | 19 +++++++++++++------ internal/ethapi/transaction_args.go | 4 ++-- 3 files changed, 29 insertions(+), 14 deletions(-) diff --git a/eth/tracers/api_tracing_rip7560.go b/eth/tracers/api_tracing_rip7560.go index 31d1b0a5c48e..f57e88889c6f 100644 --- a/eth/tracers/api_tracing_rip7560.go +++ b/eth/tracers/api_tracing_rip7560.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -49,14 +50,14 @@ func (api *Rip7560API) TraceRip7560Validation( return nil, err } var ( - msg = args.ToMessage(vmctx.BaseFee) + //msg = args.ToMessage(vmctx.BaseFee) tx = args.ToTransaction() traceConfig *TraceConfig ) if config != nil { traceConfig = &config.TraceConfig } - traceResult, err := api.traceTx(ctx, tx, msg, new(Context), vmctx, statedb, traceConfig) + traceResult, err := api.traceTx(ctx, tx, new(Context), block, vmctx, statedb, traceConfig) if err != nil { return nil, err } @@ -86,12 +87,12 @@ func (api *Rip7560API) chainContext(ctx context.Context) core.ChainContext { return ethapi.NewChainContext(ctx, api.backend) } -func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, message *core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx *Context, block *types.Block, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { var ( tracer *Tracer err error timeout = defaultTraceTimeout - usedGas uint64 + //usedGas uint64 ) if config == nil { config = &TraceConfig{} @@ -132,8 +133,15 @@ func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, messa // Call Prepare to clear out the statedb access list statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) - message.IsRip7560Frame = true - _, err = core.ApplyTransactionWithEVM(message, api.backend.ChainConfig(), new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, vmenv) + gp := new(core.GasPool).AddGas(10000000) + + // TODO: this is added to allow our bundler checking the 'TraceValidation' API is supported on Geth + if tx.Rip7560TransactionData().Sender.Cmp(common.HexToAddress("0x0000000000000000000000000000000000000000")) == 0 { + return tracer.GetResult() + } + + _, err = core.ApplyRip7560ValidationPhases(api.backend.ChainConfig(), api.chainContext(ctx), nil, gp, statedb, block.Header(), tx, vmenv.Config) + //_, err = core.ApplyTransactionWithEVM(message, api.backend.ChainConfig(), new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, vmenv) if err != nil { return nil, fmt.Errorf("tracing failed: %w", err) } diff --git a/eth/tracers/native/rip7560_validation.go b/eth/tracers/native/rip7560_validation.go index 336fb8edee6d..459e3c50fff0 100644 --- a/eth/tracers/native/rip7560_validation.go +++ b/eth/tracers/native/rip7560_validation.go @@ -14,6 +14,8 @@ func init() { tracers.DefaultDirectory.Register("rip7560Validation", newRip7560Tracer, false) } +const ValidationFramesMaxCount = 3 + func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { var config prestateTracerConfig if cfg != nil { @@ -22,10 +24,10 @@ func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trace } } t := &rip7560ValidationTracer{ - TraceResults: make([]stateMap, 0), - UsedOpcodes: make([]map[byte]bool, 0), - Created: make([]map[common.Address]bool, 0), - Deleted: make([]map[common.Address]bool, 0), + TraceResults: make([]stateMap, ValidationFramesMaxCount), + UsedOpcodes: make([]map[string]bool, ValidationFramesMaxCount), + Created: make([]map[common.Address]bool, ValidationFramesMaxCount), + Deleted: make([]map[common.Address]bool, ValidationFramesMaxCount), } return &tracers.Tracer{ Hooks: &tracing.Hooks{ @@ -42,7 +44,7 @@ func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trace type rip7560ValidationTracer struct { env *tracing.VMContext TraceResults []stateMap `json:"traceResults"` - UsedOpcodes []map[byte]bool `json:"usedOpcodes"` + UsedOpcodes []map[string]bool `json:"usedOpcodes"` Created []map[common.Address]bool `json:"created"` Deleted []map[common.Address]bool `json:"deleted"` // todo @@ -58,7 +60,12 @@ func (t *rip7560ValidationTracer) OnTxEnd(receipt *types.Receipt, err error) { } func (t *rip7560ValidationTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { - fmt.Printf("%s %d %d", vm.OpCode(opcode).String(), cost, depth) + opcodeName := vm.OpCode(opcode).String() + fmt.Printf("%s %d %d\n", opcodeName, cost, depth) + if t.UsedOpcodes[0] == nil { + t.UsedOpcodes[0] = make(map[string]bool) + } + t.UsedOpcodes[0][opcodeName] = true } func (t *rip7560ValidationTracer) GetResult() (json.RawMessage, error) { diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 5683b0a2de71..06b5ae60894a 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -80,8 +80,8 @@ type TransactionArgs struct { Signature *hexutil.Bytes Paymaster *common.Address `json:"paymaster,omitempty"` PaymasterData *hexutil.Bytes `json:"paymasterData,omitempty"` - Deployer *common.Address `json:"factory,omitempty"` - DeployerData *hexutil.Bytes `json:"factoryData,omitempty"` + Deployer *common.Address `json:"deployer,omitempty"` + DeployerData *hexutil.Bytes `json:"deployerData,omitempty"` BuilderFee *hexutil.Big ValidationGas *hexutil.Uint64 `json:"verificationGasLimit"` PaymasterGas *hexutil.Uint64 `json:"paymasterVerificationGasLimit"` From 12089354ca8f651796580fc563d2ceac15ac52bd Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 25 Jun 2024 10:54:13 +0300 Subject: [PATCH 20/73] refactor dev genesis (#10) --- tests/rip7560/rip7560TestUtils.go | 14 +------------- tests/rip7560/validation_test.go | 2 +- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index d891d4b2237a..258d8ed60904 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/params" "github.com/status-im/keycard-go/hexutils" "math/big" "testing" @@ -21,7 +20,6 @@ type testContext struct { genesisAlloc types.GenesisAlloc t *testing.T chainContext *ethapi.ChainContext - chainConfig *params.ChainConfig gaspool *core.GasPool genesis *core.Genesis genesisBlock *types.Block @@ -33,29 +31,20 @@ func newTestContext(t *testing.T) *testContext { type testContextBuilder struct { t *testing.T - chainConfig *params.ChainConfig genesisAlloc types.GenesisAlloc } func newTestContextBuilder(t *testing.T) *testContextBuilder { genesisAlloc := types.GenesisAlloc{} - chainConfig := params.AllDevChainProtocolChanges - // probably bug in geth.. - chainConfig.PragueTime = chainConfig.CancunTime - return &testContextBuilder{ t: t, - chainConfig: chainConfig, genesisAlloc: genesisAlloc, } } func (tb *testContextBuilder) build() *testContext { - genesis := &core.Genesis{ - Config: params.AllDevChainProtocolChanges, - Alloc: tb.genesisAlloc, - } + genesis := core.DeveloperGenesisBlock(10_000_000, &common.Address{}) genesisBlock := genesis.ToBlock() gaspool := new(core.GasPool).AddGas(genesisBlock.GasLimit()) @@ -66,7 +55,6 @@ func (tb *testContextBuilder) build() *testContext { t: tb.t, genesisAlloc: tb.genesisAlloc, chainContext: ethapi.NewChainContext(context.TODO(), backend), - chainConfig: tb.chainConfig, genesis: genesis, genesisBlock: genesisBlock, gaspool: gaspool, diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 357fc3a8e897..4a0b47f6b8ab 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -69,7 +69,7 @@ func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionT var state = tests.MakePreState(rawdb.NewMemoryDatabase(), t.genesisAlloc, false, rawdb.HashScheme) defer state.Close() - _, err := core.ApplyRip7560ValidationPhases(t.chainConfig, t.chainContext, &common.Address{}, t.gaspool, state.StateDB, t.genesisBlock.Header(), tx, vm.Config{}) + _, err := core.ApplyRip7560ValidationPhases(t.genesis.Config, t.chainContext, &common.Address{}, t.gaspool, state.StateDB, t.genesisBlock.Header(), tx, vm.Config{}) // err string or empty if nil errStr := "" if err != nil { From e4914004fabad48b2f06df657be464df4cb75249 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 25 Jun 2024 12:09:45 +0300 Subject: [PATCH 21/73] check error --- tests/rip7560/process_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/rip7560/process_test.go b/tests/rip7560/process_test.go index 30520fdded53..2e83d058618f 100644 --- a/tests/rip7560/process_test.go +++ b/tests/rip7560/process_test.go @@ -37,7 +37,7 @@ const privKey2 = "59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b7869 func TestProcess1(t *testing.T) { Sender := common.HexToAddress(DEFAULT_SENDER) - runProcess(newTestContextBuilder(t). + err := runProcess(newTestContextBuilder(t). withAccount(addr1, 100000000000000). withCode(DEFAULT_SENDER, createAccountCode(), 1000000000000000000). build(), []*types.Rip7560AccountAbstractionTx{ @@ -48,6 +48,9 @@ func TestProcess1(t *testing.T) { Data: []byte{1, 2, 3}, }, }) + if err != nil { + panic(err) + } } // run a set of AA transactions, with a legacy TXs before and after. From ca86ce62a7e9db4d24ffa912b9201c34bee16874 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sun, 30 Jun 2024 14:38:27 +0300 Subject: [PATCH 22/73] update tests, match return values to RIP --- core/state_processor_rip7560.go | 66 +++++++++++-------- tests/rip7560/process_test.go | 4 +- tests/rip7560/rip7560TestUtils.go | 13 +--- tests/rip7560/validation_test.go | 101 +++++++++++++++++++++++++----- 4 files changed, 128 insertions(+), 56 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index d05efdc9a535..23bced3307b7 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -1,7 +1,6 @@ package core import ( - "encoding/binary" "errors" "fmt" "github.com/ethereum/go-ethereum/accounts/abi" @@ -15,6 +14,36 @@ import ( "strings" ) +const MAGIC_VALUE_SENDER = uint64(0xbf45c166) +const MAGIC_VALUE_PAYMASTER = uint64(0xe0e6183a) +const MAGIC_VALUE_SIGFAIL = uint64(0x31665494) +const PAYMASTER_MAX_CONTEXT_SIZE = 65536 + +func PackValidationData(authorizerMagic uint64, validUntil, validAfter uint64) []byte { + + t := new(big.Int).SetUint64(uint64(validAfter)) + t = t.Lsh(t, 48).Add(t, new(big.Int).SetUint64(validUntil&0xffffff)) + t = t.Lsh(t, 160).Add(t, new(big.Int).SetUint64(uint64(authorizerMagic))) + return common.LeftPadBytes(t.Bytes(), 32) +} + +func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validUntil, validAfter uint64) { + + t := new(big.Int).SetBytes(validationData) + authorizerMagic = t.Uint64() + validUntil = t.Rsh(t, 160).Uint64() & 0xffffff + validAfter = t.Rsh(t, 48).Uint64() + return +} + +func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte) { + validationData = paymasterValidationReturn[0:32] + //2nd bytes32 is ignored (its an offset value) + contextLen := new(big.Int).SetBytes(paymasterValidationReturn[64:96]) + context = paymasterValidationReturn[96 : 96+contextLen.Uint64()] + return +} + type ValidationPhaseResult struct { TxIndex int Tx *types.Transaction @@ -399,45 +428,30 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC } func validateAccountReturnData(data []byte) (uint64, uint64, error) { - MAGIC_VALUE_SENDER := uint32(0xbf45c166) if len(data) != 32 { return 0, 0, errors.New("invalid account return data length") } - magicExpected := binary.BigEndian.Uint32(data[:4]) + magicExpected, validUntil, validAfter := UnpackValidationData(data) + //todo: we check first 8 bytes of the 20-byte address (the rest is expected to be zeros) if magicExpected != MAGIC_VALUE_SENDER { + if magicExpected == MAGIC_VALUE_SIGFAIL { + return 0, 0, errors.New("account signature error") + } return 0, 0, errors.New("account did not return correct MAGIC_VALUE") } - validAfter := binary.BigEndian.Uint64(data[4:12]) - validUntil := binary.BigEndian.Uint64(data[12:20]) return validAfter, validUntil, nil } -func validatePaymasterReturnData(data []byte) ([]byte, uint64, uint64, error) { - MAGIC_VALUE_PAYMASTER := uint32(0xe0e6183a) - if len(data) < 4 { +func validatePaymasterReturnData(data []byte) (context []byte, validAfter, validUntil uint64, error error) { + if len(data) < 32 { return nil, 0, 0, errors.New("invalid paymaster return data length") } - magicExpected := binary.BigEndian.Uint32(data[:4]) + validationData, context := UnpackPaymasterValidationReturn(data) + magicExpected, validAfter, validUntil := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } - - jsondata := `[ - {"type":"function","name":"validatePaymasterTransaction","outputs": [{"name": "context","type": "bytes"},{"name": "validUntil","type": "uint256"},{"name": "validAfter","type": "uint256"}]} - ]` - validatePaymasterTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) - if err != nil { - // todo: wrap error message - return nil, 0, 0, err - } - decodedPmReturnData, err := validatePaymasterTransactionAbi.Unpack("validatePaymasterTransaction", data[4:]) - if err != nil { - return nil, 0, 0, err - } - context := decodedPmReturnData[0].([]byte) - validAfter := decodedPmReturnData[1].(*big.Int) - validUntil := decodedPmReturnData[2].(*big.Int) - return context, validAfter.Uint64(), validUntil.Uint64(), nil + return context, validAfter, validUntil, nil } func validateValidityTimeRange(time uint64, validAfter uint64, validUntil uint64) error { diff --git a/tests/rip7560/process_test.go b/tests/rip7560/process_test.go index 2e83d058618f..c3973aaa7b09 100644 --- a/tests/rip7560/process_test.go +++ b/tests/rip7560/process_test.go @@ -48,9 +48,7 @@ func TestProcess1(t *testing.T) { Data: []byte{1, 2, 3}, }, }) - if err != nil { - panic(err) - } + assert.NoError(t, err) } // run a set of AA transactions, with a legacy TXs before and after. diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 258d8ed60904..710fc51897f6 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -15,6 +15,7 @@ import ( ) const DEFAULT_SENDER = "0x1111111111222222222233333333334444444444" +const DEFAULT_BALANCE = 1 << 62 type testContext struct { genesisAlloc types.GenesisAlloc @@ -83,26 +84,18 @@ func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) // generate the code to return the given byte array (up to 32 bytes) func returnData(data []byte) []byte { - //couldn't get geth to support PUSH0 ... datalen := len(data) - if datalen == 0 { - data = []byte{0} - } if datalen > 32 { panic(fmt.Errorf("data length is too big %v", data)) } PUSHn := byte(int(vm.PUSH0) + datalen) - ret := createCode(PUSHn, data, vm.PUSH1, 0, vm.MSTORE, vm.PUSH1, 32, vm.PUSH1, 0, vm.RETURN) + ret := createCode(PUSHn, data, vm.PUSH0, vm.MSTORE, vm.PUSH1, datalen, vm.PUSH1, 0, vm.RETURN) return ret } -// create bytecode for account func createAccountCode() []byte { - magic := big.NewInt(0xbf45c166) - magic.Lsh(magic, 256-32) - - return returnData(magic.Bytes()) + return returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 0, 0)) } // create EVM code from OpCode, byte and []bytes diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 4a0b47f6b8ab..4ccc3996762a 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -13,51 +13,111 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -func TestValidation_OOG(t *testing.T) { +func TestPackValidationData(t *testing.T) { + //assert.Equal(t, make([]byte, 32), packValidationData(0, 0, 0)) + //assert.Equal(t, new(big.Int).SetInt64(0x1234).Text(16), new(big.Int).SetBytes(packValidationData(0x1234, 0, 0)).Text(16)) + // ------------------------------------ bbbbbbbbbbbb-aaaaaaaaaaa-mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm + packed, _ := new(big.Int).SetString("0000000000020000000000010000000000000000000000000000000000001234", 16) + assert.Equal(t, packed.Text(16), new(big.Int).SetBytes(core.PackValidationData(0x1234, 1, 2)).Text(16)) +} + +func TestUnpackValidationData(t *testing.T) { + packed := core.PackValidationData(0xdead, 0xcafe, 0xface) + magic, until, after := core.UnpackValidationData(packed) + assert.Equal(t, []uint64{0xdead, 0xcafe, 0xface}, []uint64{magic, until, after}) +} + +func TestValidationFailure_OOG(t *testing.T) { magic := big.NewInt(0xbf45c166) magic.Lsh(magic, 256-32) - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), 0), types.Rip7560AccountAbstractionTx{ + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1), GasFeeCap: big.NewInt(1000000000), }, "out of gas") } +func TestValidationFailure_no_balance(t *testing.T) { + magic := big.NewInt(0xbf45c166) + magic.Lsh(magic, 256-32) + + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), 1), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1), + GasFeeCap: big.NewInt(1000000000), + }, "insufficient funds for gas * price + value: address 0x1111111111222222222233333333334444444444 have 1 want 1000000000") +} + +func TestValidationFailure_sigerror(t *testing.T) { + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "account signature error") +} + func TestValidation_ok(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0), types.Rip7560AccountAbstractionTx{ + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), - }, "") + }, "ok") } -func TestValidation_account_revert(t *testing.T) { +func TestValidation_ok_paid(t *testing.T) { + + aatx := types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + } + tb := newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE) + validatePhase(tb, aatx, "ok") + + maxCost := new(big.Int).SetUint64(aatx.ValidationGas + aatx.PaymasterGas + aatx.Gas) + maxCost.Mul(maxCost, aatx.GasFeeCap) +} + +func TestValidationFailure_account_revert(t *testing.T) { + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "execution reverted") +} + +func TestValidationFailure_account_out_of_range(t *testing.T) { validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - createCode(vm.PUSH1, 0, vm.DUP1, vm.REVERT), 0), types.Rip7560AccountAbstractionTx{ + createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "execution reverted") } -func TestValidation_account_no_return_value(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, []byte{ - byte(vm.PUSH1), 0, byte(vm.DUP1), byte(vm.RETURN), - }, 0), types.Rip7560AccountAbstractionTx{ +func TestValidationFailure_account_wrong_return_length(t *testing.T) { + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + returnData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") } -func TestValidation_account_wrong_return_value(t *testing.T) { +func TestValidationFailure_account_no_return_value(t *testing.T) { validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(createCode(1)), - 0), types.Rip7560AccountAbstractionTx{ + returnData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "invalid account return data length") +} + +func TestValidationFailure_account_wrong_return_value(t *testing.T) { + // create buffer of 32 byte array + validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + returnData(make([]byte, 32)), + DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "account did not return correct MAGIC_VALUE") } -func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionTx, expectedErr string) { +func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionTx, expectedErr string) *core.ValidationPhaseResult { t := tb.build() if aatx.Sender == nil { //pre-deployed sender account @@ -69,13 +129,20 @@ func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionT var state = tests.MakePreState(rawdb.NewMemoryDatabase(), t.genesisAlloc, false, rawdb.HashScheme) defer state.Close() - _, err := core.ApplyRip7560ValidationPhases(t.genesis.Config, t.chainContext, &common.Address{}, t.gaspool, state.StateDB, t.genesisBlock.Header(), tx, vm.Config{}) - // err string or empty if nil - errStr := "" + state.StateDB.SetTxContext(tx.Hash(), 0) + err := core.BuyGasRip7560Transaction(&aatx, state.StateDB) + + var res *core.ValidationPhaseResult + if err == nil { + res, err = core.ApplyRip7560ValidationPhases(t.genesis.Config, t.chainContext, &common.Address{}, t.gaspool, state.StateDB, t.genesisBlock.Header(), tx, vm.Config{}) + // err string or empty if nil + } + errStr := "ok" if err != nil { errStr = err.Error() } assert.Equal(t.t, expectedErr, errStr) + return res } //test failure on non-rip7560 From 71e6d5ad1d25142b60a16e91eb1631d310d8952b Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sun, 30 Jun 2024 15:41:10 +0300 Subject: [PATCH 23/73] refactor: test HandleRip7560Transactions (instead of individual validation methods) --- tests/rip7560/validation_test.go | 34 +++++++++++--------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 4ccc3996762a..cddb45a7fc55 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -28,27 +28,23 @@ func TestUnpackValidationData(t *testing.T) { } func TestValidationFailure_OOG(t *testing.T) { - magic := big.NewInt(0xbf45c166) - magic.Lsh(magic, 256-32) - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1), GasFeeCap: big.NewInt(1000000000), }, "out of gas") } func TestValidationFailure_no_balance(t *testing.T) { - magic := big.NewInt(0xbf45c166) - magic.Lsh(magic, 256-32) - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(magic.Bytes()), 1), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 1), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1), GasFeeCap: big.NewInt(1000000000), }, "insufficient funds for gas * price + value: address 0x1111111111222222222233333333334444444444 have 1 want 1000000000") } func TestValidationFailure_sigerror(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "account signature error") @@ -56,7 +52,7 @@ func TestValidationFailure_sigerror(t *testing.T) { func TestValidation_ok(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "ok") @@ -69,14 +65,14 @@ func TestValidation_ok_paid(t *testing.T) { GasFeeCap: big.NewInt(1000000000), } tb := newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE) - validatePhase(tb, aatx, "ok") + handleTransaction(tb, aatx, "ok") maxCost := new(big.Int).SetUint64(aatx.ValidationGas + aatx.PaymasterGas + aatx.Gas) maxCost.Mul(maxCost, aatx.GasFeeCap) } func TestValidationFailure_account_revert(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), @@ -84,7 +80,7 @@ func TestValidationFailure_account_revert(t *testing.T) { } func TestValidationFailure_account_out_of_range(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), @@ -92,7 +88,7 @@ func TestValidationFailure_account_out_of_range(t *testing.T) { } func TestValidationFailure_account_wrong_return_length(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), @@ -100,7 +96,7 @@ func TestValidationFailure_account_wrong_return_length(t *testing.T) { } func TestValidationFailure_account_no_return_value(t *testing.T) { - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), @@ -109,7 +105,7 @@ func TestValidationFailure_account_no_return_value(t *testing.T) { func TestValidationFailure_account_wrong_return_value(t *testing.T) { // create buffer of 32 byte array - validatePhase(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(make([]byte, 32)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), @@ -117,7 +113,7 @@ func TestValidationFailure_account_wrong_return_value(t *testing.T) { }, "account did not return correct MAGIC_VALUE") } -func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionTx, expectedErr string) *core.ValidationPhaseResult { +func handleTransaction(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionTx, expectedErr string) { t := tb.build() if aatx.Sender == nil { //pre-deployed sender account @@ -130,19 +126,13 @@ func validatePhase(tb *testContextBuilder, aatx types.Rip7560AccountAbstractionT defer state.Close() state.StateDB.SetTxContext(tx.Hash(), 0) - err := core.BuyGasRip7560Transaction(&aatx, state.StateDB) + _, _, _, err := core.HandleRip7560Transactions([]*types.Transaction{tx}, 0, state.StateDB, &common.Address{}, t.genesisBlock.Header(), t.gaspool, t.genesis.Config, t.chainContext, vm.Config{}) - var res *core.ValidationPhaseResult - if err == nil { - res, err = core.ApplyRip7560ValidationPhases(t.genesis.Config, t.chainContext, &common.Address{}, t.gaspool, state.StateDB, t.genesisBlock.Header(), tx, vm.Config{}) - // err string or empty if nil - } errStr := "ok" if err != nil { errStr = err.Error() } assert.Equal(t.t, expectedErr, errStr) - return res } //test failure on non-rip7560 From 702d157aef8a766def35b1f29fee3165849ed5d8 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 12:56:48 +0300 Subject: [PATCH 24/73] test nonce --- core/state_processor_rip7560.go | 28 ++++++++++++++++++++++++++-- core/types/tx_rip7560.go | 7 ++++--- tests/rip7560/validation_test.go | 13 +++++++++++-- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 23bced3307b7..27eb35a2ef33 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -88,11 +88,16 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta aatx := tx.Rip7560TransactionData() statedb.SetTxContext(tx.Hash(), index+i) - err := BuyGasRip7560Transaction(aatx, statedb) - var vpr *ValidationPhaseResult + err := CheckNonceRip7560(aatx, statedb) if err != nil { return nil, nil, nil, err } + err = BuyGasRip7560Transaction(aatx, statedb) + if err != nil { + return nil, nil, nil, err + } + + var vpr *ValidationPhaseResult vpr, err = ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) if err != nil { return nil, nil, nil, err @@ -142,6 +147,24 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St return nil } +// precheck nonce of transaction. +// (standard preCheck function check both nonce and no-code of account) +func CheckNonceRip7560(tx *types.Rip7560AccountAbstractionTx, st *state.StateDB) error { + // Make sure this transaction's nonce is correct. + stNonce := st.GetNonce(*tx.Sender) + if msgNonce := tx.Nonce; stNonce < msgNonce { + return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, + tx.Sender.Hex(), msgNonce, stNonce) + } else if stNonce > msgNonce { + return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, + tx.Sender.Hex(), msgNonce, stNonce) + } else if stNonce+1 < stNonce { + return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax, + tx.Sender.Hex(), stNonce) + } + return nil +} + func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { blockContext := NewEVMBlockContext(header, bc, author) txContext := vm.TxContext{ @@ -333,6 +356,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par return &Message{ From: chainConfig.EntryPointAddress, To: tx.Sender, + Nonce: tx.Nonce, Value: big.NewInt(0), GasLimit: tx.ValidationGas - deploymentUsedGas, GasPrice: tx.GasFeeCap, diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 96855be3a45d..76443ac0785b 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -55,9 +55,10 @@ type Rip7560AccountAbstractionTx struct { // copy creates a deep copy of the transaction data and initializes all fields. func (tx *Rip7560AccountAbstractionTx) copy() TxData { cpy := &Rip7560AccountAbstractionTx{ - To: copyAddressPtr(tx.To), - Data: common.CopyBytes(tx.Data), - Gas: tx.Gas, + To: copyAddressPtr(tx.To), + Data: common.CopyBytes(tx.Data), + Nonce: tx.Nonce, + Gas: tx.Gas, // These are copied below. AccessList: make(AccessList, len(tx.AccessList)), Value: new(big.Int), diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index cddb45a7fc55..cd44610b1c32 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -16,8 +16,9 @@ import ( func TestPackValidationData(t *testing.T) { //assert.Equal(t, make([]byte, 32), packValidationData(0, 0, 0)) //assert.Equal(t, new(big.Int).SetInt64(0x1234).Text(16), new(big.Int).SetBytes(packValidationData(0x1234, 0, 0)).Text(16)) - // ------------------------------------ bbbbbbbbbbbb-aaaaaaaaaaa-mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - packed, _ := new(big.Int).SetString("0000000000020000000000010000000000000000000000000000000000001234", 16) + // --------------- after 6bytes before 6 bytes magic 20 bytes + validationData := "000000000002" + "000000000001" + "0000000000000000000000000000000000001234" + packed, _ := new(big.Int).SetString(validationData, 16) assert.Equal(t, packed.Text(16), new(big.Int).SetBytes(core.PackValidationData(0x1234, 1, 2)).Text(16)) } @@ -71,6 +72,14 @@ func TestValidation_ok_paid(t *testing.T) { maxCost.Mul(maxCost, aatx.GasFeeCap) } +func TestValidationFailure_account_nonce(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + Nonce: 1234, + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "nonce too high: address 0x1111111111222222222233333333334444444444, tx: 1234 state: 0") +} + func TestValidationFailure_account_revert(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ From 49bc2a85701ec4c4beab2abae3129e00c12c0f16 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 14:23:48 +0300 Subject: [PATCH 25/73] added time-range tests --- tests/rip7560/rip7560TestUtils.go | 1 + tests/rip7560/validation_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 710fc51897f6..1e50d9b7fbff 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -46,6 +46,7 @@ func newTestContextBuilder(t *testing.T) *testContextBuilder { func (tb *testContextBuilder) build() *testContext { genesis := core.DeveloperGenesisBlock(10_000_000, &common.Address{}) + genesis.Timestamp = 100 genesisBlock := genesis.ToBlock() gaspool := new(core.GasPool).AddGas(genesisBlock.GasLimit()) diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index cd44610b1c32..7ab87ddee589 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -51,6 +51,24 @@ func TestValidationFailure_sigerror(t *testing.T) { }, "account signature error") } +func TestValidationFailure_validAfter(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "RIP-7560 transaction validity not reached yet") +} + +func TestValidationFailure_validUntil(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, + returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + }, "RIP-7560 transaction validity expired") +} + func TestValidation_ok(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ From d5c3069d710129ede8f743eddc65333193fe1581 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sat, 6 Jul 2024 15:12:44 +0300 Subject: [PATCH 26/73] pr issues --- core/state_processor_rip7560.go | 1 - tests/rip7560/validation_test.go | 2 -- 2 files changed, 3 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 27eb35a2ef33..208e4609de5b 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -356,7 +356,6 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par return &Message{ From: chainConfig.EntryPointAddress, To: tx.Sender, - Nonce: tx.Nonce, Value: big.NewInt(0), GasLimit: tx.ValidationGas - deploymentUsedGas, GasPrice: tx.GasFeeCap, diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 7ab87ddee589..cdbd5d3c2463 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -14,8 +14,6 @@ import ( ) func TestPackValidationData(t *testing.T) { - //assert.Equal(t, make([]byte, 32), packValidationData(0, 0, 0)) - //assert.Equal(t, new(big.Int).SetInt64(0x1234).Text(16), new(big.Int).SetBytes(packValidationData(0x1234, 0, 0)).Text(16)) // --------------- after 6bytes before 6 bytes magic 20 bytes validationData := "000000000002" + "000000000001" + "0000000000000000000000000000000000001234" packed, _ := new(big.Int).SetString(validationData, 16) From 6e2e44f32b8196f5e5eba66e4d45f5ff7c9c74ba Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 16:46:06 +0300 Subject: [PATCH 27/73] initial paymaster test flows --- core/state_processor_rip7560.go | 32 ++++++++++++---- tests/rip7560/paymaster_test.go | 67 +++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 8 deletions(-) create mode 100644 tests/rip7560/paymaster_test.go diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 208e4609de5b..0ceb83448e7e 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -37,9 +37,16 @@ func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validU } func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte) { + if len(paymasterValidationReturn) < 96 { + return nil, nil + } validationData = paymasterValidationReturn[0:32] //2nd bytes32 is ignored (its an offset value) contextLen := new(big.Int).SetBytes(paymasterValidationReturn[64:96]) + if uint64(len(paymasterValidationReturn)) < 96+contextLen.Uint64() { + return nil, nil + } + context = paymasterValidationReturn[96 : 96+contextLen.Uint64()] return } @@ -133,17 +140,17 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St mgval = mgval.Mul(mgval, gasFeeCap) balanceCheck := new(uint256.Int).Set(mgval) - chargeFrom := *st.Sender + chargeFrom := st.Sender - if len(st.PaymasterData) >= 20 { - chargeFrom = [20]byte(st.PaymasterData[:20]) + if st.Paymaster != nil { + chargeFrom = st.Paymaster } - if have, want := state.GetBalance(chargeFrom), balanceCheck; have.Cmp(want) < 0 { + if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) } - state.SubBalance(chargeFrom, mgval, 0) + state.SubBalance(*chargeFrom, mgval, 0) return nil } @@ -210,6 +217,9 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte } paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) + if err != nil { + return nil, err + } vpr := &ValidationPhaseResult{ Tx: tx, TxHash: tx.Hash(), @@ -241,6 +251,9 @@ func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.Ch if err != nil { return nil, 0, 0, 0, err } + if resultPm.Failed() { + return nil, 0, 0, 0, resultPm.Err + } statedb.IntermediateRoot(true) if resultPm.Failed() { return nil, 0, 0, 0, errors.New("paymaster validation failed - invalid transaction") @@ -370,10 +383,10 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { tx := baseTx.Rip7560TransactionData() - if len(tx.PaymasterData) < 20 { + paymasterAddress := tx.Paymaster + if paymasterAddress == nil { return nil, nil } - var paymasterAddress common.Address = [20]byte(tx.PaymasterData[0:20]) jsondata := `[ {"type":"function","name":"validatePaymasterTransaction","inputs": [{"name": "version","type": "uint256"},{"name": "txHash","type": "bytes32"},{"name": "transaction","type": "bytes"}]} ]` @@ -387,7 +400,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params } return &Message{ From: config.EntryPointAddress, - To: &paymasterAddress, + To: paymasterAddress, Value: big.NewInt(0), GasLimit: tx.PaymasterGas, GasPrice: tx.GasFeeCap, @@ -470,6 +483,9 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid return nil, 0, 0, errors.New("invalid paymaster return data length") } validationData, context := UnpackPaymasterValidationReturn(data) + if validationData == nil { + return nil, 0, 0, errors.New("invalid paymaster return data") + } magicExpected, validAfter, validUntil := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go new file mode 100644 index 000000000000..bb2cd6c67018 --- /dev/null +++ b/tests/rip7560/paymaster_test.go @@ -0,0 +1,67 @@ +package rip7560 + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "math/big" + "slices" + "testing" +) + +var DEFAULT_PAYMASTER = common.HexToAddress("0xaaaaaaaaaabbbbbbbbbbccccccccccdddddddddd") + +func TestPaymasterValidationFailure_nobalance(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), 1), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "insufficient funds for gas * price + value: address 0xaaAaaAAAAAbBbbbbBbBBCCCCcCCCcCdddDDDdddd have 1 want 1000000000000000000") +} + +func TestPaymasterValidationFailure_oog(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "out of gas") +} +func TestPaymasterValidationFailure_revert(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + PaymasterGas: 1000000000, + }, "execution reverted") +} + +func asBytes32(a int) []byte { + return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) +} +func paymasterReturnValue(magic, validAfter, validUntil uint64, context []byte) []byte { + validationData := core.PackValidationData(magic, validUntil, validAfter) + //manual encode (bytes32 validationData, bytes context) + return slices.Concat( + common.LeftPadBytes(validationData, 32), + asBytes32(64), + asBytes32(len(context)), + context) +} + +func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "invalid paymaster return data") +} From 6b7385d6a7a5b7323e44551b33aebb9622f4e732 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 14:17:25 +0300 Subject: [PATCH 28/73] returnWithData, revertWithData arbitrary-length return or revert (currently, can't test it, since we check "error", and actual revert data is hidden. will be probably needed when testing paymaster.) --- tests/rip7560/rip7560TestUtils.go | 38 +++++++++++++++++++++++-------- tests/rip7560/validation_test.go | 15 +++++++----- 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 1e50d9b7fbff..a60b4730663f 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -83,20 +83,38 @@ func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) return tt } -// generate the code to return the given byte array (up to 32 bytes) -func returnData(data []byte) []byte { - datalen := len(data) - if datalen > 32 { - panic(fmt.Errorf("data length is too big %v", data)) +// create code to copy data into memory at the given offset +// NOTE: if data is not in 32-byte multiples, it will override the next bytes +// used by RETURN/REVERT +func copyToMemory(data []byte, offset uint) []byte { + ret := []byte{} + for len(data) > 32 { + ret = append(ret, createCode(vm.PUSH32, data[0:32], vm.PUSH2, uint16(offset), vm.MSTORE)...) + data = data[32:] + offset = offset + 32 + } + + if len(data) > 0 { + PUSHn := byte(int(vm.PUSH0) + len(data)) + ret = append(ret, createCode(PUSHn, data, vm.PUSH2, uint16(offset), vm.MSTORE)...) } + return ret +} - PUSHn := byte(int(vm.PUSH0) + datalen) - ret := createCode(PUSHn, data, vm.PUSH0, vm.MSTORE, vm.PUSH1, datalen, vm.PUSH1, 0, vm.RETURN) +// revert with given data +func revertWithData(data []byte) []byte { + ret := append(copyToMemory(data, 0), createCode(vm.PUSH2, uint16(len(data)), vm.PUSH0, vm.REVERT)...) + return ret +} + +// generate the code to return the given byte array (up to 32 bytes) +func returnWithData(data []byte) []byte { + ret := append(copyToMemory(data, 0), createCode(vm.PUSH2, uint16(len(data)), vm.PUSH0, vm.RETURN)...) return ret } func createAccountCode() []byte { - return returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 0, 0)) + return returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 0, 0)) } // create EVM code from OpCode, byte and []bytes @@ -115,9 +133,11 @@ func createCode(items ...interface{}) []byte { buffer.Write(v) case int8: buffer.WriteByte(byte(v)) + case uint16: + buffer.Write([]byte{byte(v >> 8), byte(v)}) case int: if v >= 256 { - panic(fmt.Errorf("int defaults to int8 (byte). int16, etc: %v", v)) + panic(fmt.Errorf("int defaults to int8 (byte). use int16, etc: %v", v)) } buffer.WriteByte(byte(v)) default: diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index cdbd5d3c2463..1afec6c7f01e 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -5,6 +5,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/tests" + "github.com/status-im/keycard-go/hexutils" "github.com/stretchr/testify/assert" "math/big" "testing" @@ -43,7 +44,7 @@ func TestValidationFailure_no_balance(t *testing.T) { } func TestValidationFailure_sigerror(t *testing.T) { - handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "account signature error") @@ -104,9 +105,11 @@ func TestValidationFailure_account_revert(t *testing.T) { }, "execution reverted") } -func TestValidationFailure_account_out_of_range(t *testing.T) { +func TestValidationFailure_account_revert_with_reason(t *testing.T) { + // cast abi-encode 'Error(string)' hello + reason := hexutils.HexToBytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000568656c6c6f000000000000000000000000000000000000000000000000000000") handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + revertWithData(reason), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "execution reverted") @@ -114,7 +117,7 @@ func TestValidationFailure_account_out_of_range(t *testing.T) { func TestValidationFailure_account_wrong_return_length(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") @@ -122,7 +125,7 @@ func TestValidationFailure_account_wrong_return_length(t *testing.T) { func TestValidationFailure_account_no_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") @@ -131,7 +134,7 @@ func TestValidationFailure_account_no_return_value(t *testing.T) { func TestValidationFailure_account_wrong_return_value(t *testing.T) { // create buffer of 32 byte array handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(make([]byte, 32)), + returnWithData(make([]byte, 32)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), From b0b490f350fa95920bbdfc389f6bea7c2f6a2681 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 20:49:00 +0300 Subject: [PATCH 29/73] test paymaster validations --- core/state_processor_rip7560.go | 7 ++-- tests/rip7560/paymaster_test.go | 56 +++++++++++++++++++++++-------- tests/rip7560/rip7560TestUtils.go | 34 +++++++++++++++++++ tests/rip7560/validation_test.go | 4 +-- 4 files changed, 83 insertions(+), 18 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 0ceb83448e7e..aa815ad76d3c 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -447,10 +447,10 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC if err != nil { return nil, err } - var paymasterAddress common.Address = [20]byte(tx.PaymasterData[0:20]) + var paymasterAddress = tx.Paymaster return &Message{ From: chainConfig.EntryPointAddress, - To: &paymasterAddress, + To: paymasterAddress, Value: big.NewInt(0), GasLimit: tx.PaymasterGas - executionResult.UsedGas, GasPrice: tx.GasFeeCap, @@ -490,6 +490,9 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } + if len(context) > PAYMASTER_MAX_CONTEXT_SIZE { + return nil, 0, 0, errors.New("paymaster context too large") + } return context, validAfter, validUntil, nil } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index bb2cd6c67018..4d9a37ce4e31 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -6,7 +6,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "math/big" - "slices" "testing" ) @@ -42,19 +41,6 @@ func TestPaymasterValidationFailure_revert(t *testing.T) { }, "execution reverted") } -func asBytes32(a int) []byte { - return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) -} -func paymasterReturnValue(magic, validAfter, validUntil uint64, context []byte) []byte { - validationData := core.PackValidationData(magic, validUntil, validAfter) - //manual encode (bytes32 validationData, bytes context) - return slices.Concat( - common.LeftPadBytes(validationData, 32), - asBytes32(64), - asBytes32(len(context)), - context) -} - func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). @@ -65,3 +51,45 @@ func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { Paymaster: &DEFAULT_PAYMASTER, }, "invalid paymaster return data") } + +func TestPaymasterValidationFailure_wrong_magic(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(1, 2, 3, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "paymaster did not return correct MAGIC_VALUE") +} + +func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { + //paymaster returning huge context. + // first word is magic return value + // 2nd word is offset (fixed 64) + // 3rd word is length of context (max+1) + // then we return the total length of above (context itself is uninitialized string of max+1 zeroes) + pmCode := createCode( + //vm.PUSH1, 1, vm.PUSH0, vm.RETURN, + copyToMemory(core.PackValidationData(core.MAGIC_VALUE_PAYMASTER, 0, 0), 0), + copyToMemory(asBytes32(64), 32), + copyToMemory(asBytes32(core.PAYMASTER_MAX_CONTEXT_SIZE+1), 64), + push(core.PAYMASTER_MAX_CONTEXT_SIZE+96+1), vm.PUSH0, vm.RETURN) + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), pmCode, DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "paymaster context too large") +} + +func TestPaymasterValidation_ok(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "ok") +} diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index a60b4730663f..82c46650dd95 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/status-im/keycard-go/hexutils" "math/big" + "slices" "testing" ) @@ -83,6 +84,23 @@ func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) return tt } +// generate a push opcode and its following constant value +func push(n int) []byte { + if n < 0 { + panic("attempt to push negative") + } + if n < 256 { + return createCode(vm.PUSH1, byte(n)) + } + if n < 65536 { + return createCode(vm.PUSH2, byte(n>>8), byte(n)) + } + if n < 1<<32 { + return createCode(vm.PUSH4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + } + panic("larger number") +} + // create code to copy data into memory at the given offset // NOTE: if data is not in 32-byte multiples, it will override the next bytes // used by RETURN/REVERT @@ -135,6 +153,8 @@ func createCode(items ...interface{}) []byte { buffer.WriteByte(byte(v)) case uint16: buffer.Write([]byte{byte(v >> 8), byte(v)}) + case uint32: + buffer.Write([]byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}) case int: if v >= 256 { panic(fmt.Errorf("int defaults to int8 (byte). use int16, etc: %v", v)) @@ -148,3 +168,17 @@ func createCode(items ...interface{}) []byte { return buffer.Bytes() } + +func asBytes32(a int) []byte { + return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) +} + +func paymasterReturnValue(magic, validUntil, validAfter uint64, context []byte) []byte { + validationData := core.PackValidationData(magic, validUntil, validAfter) + //manual encode (bytes32 validationData, bytes context) + return slices.Concat( + common.LeftPadBytes(validationData, 32), + asBytes32(64), + asBytes32(len(context)), + context) +} diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 1afec6c7f01e..c7a75fdc9127 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -53,7 +53,7 @@ func TestValidationFailure_sigerror(t *testing.T) { func TestValidationFailure_validAfter(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity not reached yet") @@ -62,7 +62,7 @@ func TestValidationFailure_validAfter(t *testing.T) { func TestValidationFailure_validUntil(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity expired") From 603d385c056ec82e7af293472184651f0e7b0a19 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 22:11:30 +0300 Subject: [PATCH 30/73] paymaster time-range checking --- core/state_processor_rip7560.go | 2 +- tests/rip7560/paymaster_test.go | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index aa815ad76d3c..9e0333ed4d27 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -486,7 +486,7 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid if validationData == nil { return nil, 0, 0, errors.New("invalid paymaster return data") } - magicExpected, validAfter, validUntil := UnpackValidationData(validationData) + magicExpected, validUntil, validAfter := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index 4d9a37ce4e31..e6a026d27676 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -84,6 +84,26 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { }, "paymaster context too large") } +func TestPaymasterValidationFailure_validAfter(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 300, 200, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "RIP-7560 transaction validity not reached yet") +} + +func TestPaymasterValidationFailure_validUntil(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 1, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "RIP-7560 transaction validity expired") +} + func TestPaymasterValidation_ok(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ From fcf91d4525753aff970e23350dfbc976e6ab8e2a Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sat, 6 Jul 2024 15:49:03 +0300 Subject: [PATCH 31/73] pr reviews --- core/state_processor_rip7560.go | 26 ++++++++++++-------------- tests/rip7560/paymaster_test.go | 4 ++-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 9e0333ed4d27..599ee35682ab 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -36,15 +36,18 @@ func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validU return } -func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte) { +func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte, err error) { if len(paymasterValidationReturn) < 96 { - return nil, nil + return nil, nil, errors.New("paymaster return data: too short") } validationData = paymasterValidationReturn[0:32] //2nd bytes32 is ignored (its an offset value) contextLen := new(big.Int).SetBytes(paymasterValidationReturn[64:96]) if uint64(len(paymasterValidationReturn)) < 96+contextLen.Uint64() { - return nil, nil + return nil, nil, errors.New("paymaster return data: unable to decode context") + } + if contextLen.Cmp(big.NewInt(PAYMASTER_MAX_CONTEXT_SIZE)) > 0 { + return nil, nil, errors.New("paymaster return data: context too large") } context = paymasterValidationReturn[96 : 96+contextLen.Uint64()] @@ -383,8 +386,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { tx := baseTx.Rip7560TransactionData() - paymasterAddress := tx.Paymaster - if paymasterAddress == nil { + if tx.Paymaster == nil { return nil, nil } jsondata := `[ @@ -400,7 +402,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params } return &Message{ From: config.EntryPointAddress, - To: paymasterAddress, + To: tx.Paymaster, Value: big.NewInt(0), GasLimit: tx.PaymasterGas, GasPrice: tx.GasFeeCap, @@ -447,10 +449,9 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC if err != nil { return nil, err } - var paymasterAddress = tx.Paymaster return &Message{ From: chainConfig.EntryPointAddress, - To: paymasterAddress, + To: tx.Paymaster, Value: big.NewInt(0), GasLimit: tx.PaymasterGas - executionResult.UsedGas, GasPrice: tx.GasFeeCap, @@ -482,17 +483,14 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid if len(data) < 32 { return nil, 0, 0, errors.New("invalid paymaster return data length") } - validationData, context := UnpackPaymasterValidationReturn(data) - if validationData == nil { - return nil, 0, 0, errors.New("invalid paymaster return data") + validationData, context, err := UnpackPaymasterValidationReturn(data) + if err != nil { + return nil, 0, 0, err } magicExpected, validUntil, validAfter := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } - if len(context) > PAYMASTER_MAX_CONTEXT_SIZE { - return nil, 0, 0, errors.New("paymaster context too large") - } return context, validAfter, validUntil, nil } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index e6a026d27676..b169e0be239c 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -49,7 +49,7 @@ func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { PaymasterGas: 1000000000, GasFeeCap: big.NewInt(1000000000), Paymaster: &DEFAULT_PAYMASTER, - }, "invalid paymaster return data") + }, "paymaster return data: too short") } func TestPaymasterValidationFailure_wrong_magic(t *testing.T) { @@ -81,7 +81,7 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { PaymasterGas: 1000000000, GasFeeCap: big.NewInt(1000000000), Paymaster: &DEFAULT_PAYMASTER, - }, "paymaster context too large") + }, "paymaster return data: context too large") } func TestPaymasterValidationFailure_validAfter(t *testing.T) { From 65daa36d1faf12a4cd79be2e5eb770121a25b442 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Wed, 3 Jul 2024 18:50:20 +0300 Subject: [PATCH 32/73] test deployer flows --- core/state_processor_rip7560.go | 33 +++++++----- tests/rip7560/deployer_test.go | 88 +++++++++++++++++++++++++++++++ tests/rip7560/rip7560TestUtils.go | 36 +++++++++++-- 3 files changed, 142 insertions(+), 15 deletions(-) create mode 100644 tests/rip7560/deployer_test.go diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 599ee35682ab..13d4558ba231 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -177,8 +177,9 @@ func CheckNonceRip7560(tx *types.Rip7560AccountAbstractionTx, st *state.StateDB) func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { blockContext := NewEVMBlockContext(header, bc, author) + sender := tx.Rip7560TransactionData().Sender txContext := vm.TxContext{ - Origin: *tx.Rip7560TransactionData().Sender, + Origin: *sender, GasPrice: tx.GasFeeCap(), } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) @@ -186,16 +187,25 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte deployerMsg := prepareDeployerMessage(tx, chainConfig) var deploymentUsedGas uint64 if deployerMsg != nil { - resultDeployer, err := ApplyMessage(evm, deployerMsg, gp) - if err != nil { - return nil, err + var err error + var resultDeployer *ExecutionResult + if statedb.GetCodeSize(*sender) != 0 { + err = errors.New("sender already deployed") + } else { + resultDeployer, err = ApplyMessage(evm, deployerMsg, gp) } - statedb.IntermediateRoot(true) - if resultDeployer.Failed() { + if err == nil && resultDeployer != nil { + err = resultDeployer.Err + deploymentUsedGas = resultDeployer.UsedGas + } + if err == nil && statedb.GetCodeSize(*sender) == 0 { + err = errors.New("sender not deployed") + } + if err != nil { // TODO: bubble up the inner error message to the user, if possible - return nil, errors.New("account deployment failed - invalid transaction") + return nil, fmt.Errorf("account deployment failed: %v", err) } - deploymentUsedGas = resultDeployer.UsedGas + statedb.IntermediateRoot(true) } /*** Account Validation Frame ***/ @@ -338,19 +348,18 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { tx := baseTx.Rip7560TransactionData() - if len(tx.DeployerData) < 20 { + if tx.Deployer == nil { return nil } - var deployerAddress common.Address = [20]byte(tx.DeployerData[0:20]) return &Message{ From: config.DeployerCallerAddress, - To: &deployerAddress, + To: tx.Deployer, Value: big.NewInt(0), GasLimit: tx.ValidationGas, GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, - Data: tx.DeployerData[20:], + Data: tx.DeployerData, AccessList: make(types.AccessList, 0), SkipAccountChecks: true, IsRip7560Frame: true, diff --git a/tests/rip7560/deployer_test.go b/tests/rip7560/deployer_test.go new file mode 100644 index 000000000000..e07ea49ef14b --- /dev/null +++ b/tests/rip7560/deployer_test.go @@ -0,0 +1,88 @@ +package rip7560 + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "math/big" + "testing" +) + +var DEPLOYER = common.HexToAddress("0xddddddddddeeeeeeeeeeddddddddddeeeeeeeeee") + +func TestValidationFailure_deployerRevert(t *testing.T) { + handleTransaction(newTestContextBuilder(t). + withCode(DEFAULT_SENDER, []byte{}, DEFAULT_BALANCE). + withCode(DEPLOYER.Hex(), revertWithData([]byte{}), 0), + types.Rip7560AccountAbstractionTx{ + Deployer: &DEPLOYER, + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + }, "account deployment failed: execution reverted") +} + +func TestValidationFailure_deployerOOG(t *testing.T) { + handleTransaction(newTestContextBuilder(t). + withCode(DEFAULT_SENDER, []byte{}, DEFAULT_BALANCE). + withCode(DEPLOYER.Hex(), revertWithData([]byte{}), 0), + types.Rip7560AccountAbstractionTx{ + Deployer: &DEPLOYER, + ValidationGas: 1, + GasFeeCap: big.NewInt(1000000000), + }, "account deployment failed: out of gas") +} + +func TestValidationFailure_senderNotDeployed(t *testing.T) { + handleTransaction(newTestContextBuilder(t). + withCode(DEFAULT_SENDER, []byte{}, DEFAULT_BALANCE). + withCode(DEPLOYER.Hex(), returnWithData([]byte{}), 0), + types.Rip7560AccountAbstractionTx{ + Deployer: &DEPLOYER, + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + }, "account deployment failed: sender not deployed") +} + +func TestValidationFailure_senderAlreadyDeployed(t *testing.T) { + accountCode := revertWithData([]byte{}) + deployerCode := create2(accountCode) + sender := create2_addr(DEPLOYER, accountCode) + handleTransaction(newTestContextBuilder(t). + withCode(sender.Hex(), accountCode, DEFAULT_BALANCE). + withCode(DEPLOYER.Hex(), deployerCode, 0), + types.Rip7560AccountAbstractionTx{ + Sender: &sender, + Deployer: &DEPLOYER, + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + }, "account deployment failed: sender already deployed") +} + +func TestValidationFailure_senderReverts(t *testing.T) { + accountCode := revertWithData([]byte{}) + deployerCode := createCode(create2(accountCode), returnWithData([]byte{})) + sender := create2_addr(DEPLOYER, accountCode) + handleTransaction(newTestContextBuilder(t). + withCode(sender.Hex(), []byte{}, DEFAULT_BALANCE). + withCode(DEPLOYER.Hex(), deployerCode, 0), + types.Rip7560AccountAbstractionTx{ + Sender: &sender, + Deployer: &DEPLOYER, + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + }, "execution reverted") +} + +func TestValidation_deployer_ok(t *testing.T) { + accountCode := createAccountCode() + deployerCode := createCode(create2(accountCode), returnWithData([]byte{})) + sender := create2_addr(DEPLOYER, accountCode) + handleTransaction(newTestContextBuilder(t). + withCode(sender.Hex(), []byte{}, DEFAULT_BALANCE). + withCode(DEPLOYER.Hex(), deployerCode, 0), + types.Rip7560AccountAbstractionTx{ + Sender: &sender, + Deployer: &DEPLOYER, + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + }, "ok") +} diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 82c46650dd95..5ebf610d002b 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/status-im/keycard-go/hexutils" "math/big" @@ -45,6 +46,31 @@ func newTestContextBuilder(t *testing.T) *testContextBuilder { } } +// return a contract code that will deploy the given code +func create2_contract(deployedCode []byte) []byte { + + return returnWithData(deployedCode) +} + +// return the generated address when deploying the given code +func create2_addr(deployer common.Address, deployedCode []byte) common.Address { + + contractCode := create2_contract(deployedCode) + data := createCode(0xff, deployer.Bytes(), common.Hash{}, crypto.Keccak256(contractCode)) + return common.BytesToAddress(crypto.Keccak256(data)) +} + +// generate code to call create2 +// note: parameter is the deployed code, not the full contract code +// always use zero value and zero salt. +func create2(deployedCode []byte) []byte { + contractCode := create2_contract(deployedCode) + return createCode( + copyToMemory(contractCode, 0), + push(0), push(len(contractCode)), push(0), push(0), vm.CREATE2, + ) +} + func (tb *testContextBuilder) build() *testContext { genesis := core.DeveloperGenesisBlock(10_000_000, &common.Address{}) genesis.Timestamp = 100 @@ -69,7 +95,6 @@ func (tt *testContextBuilder) withAccount(addr string, balance int64) *testConte tt.genesisAlloc[common.HexToAddress(addr)] = types.Account{Balance: big.NewInt(balance)} return tt } - func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) *testContextBuilder { if len(code) == 0 { tt.genesisAlloc[common.HexToAddress(addr)] = types.Account{ @@ -113,8 +138,9 @@ func copyToMemory(data []byte, offset uint) []byte { } if len(data) > 0 { - PUSHn := byte(int(vm.PUSH0) + len(data)) - ret = append(ret, createCode(PUSHn, data, vm.PUSH2, uint16(offset), vm.MSTORE)...) + //push data up, as EVM is big-endian + v := common.RightPadBytes(data, 32) + ret = append(ret, createCode(vm.PUSH32, v, vm.PUSH2, uint16(offset), vm.MSTORE)...) } return ret } @@ -160,6 +186,10 @@ func createCode(items ...interface{}) []byte { panic(fmt.Errorf("int defaults to int8 (byte). use int16, etc: %v", v)) } buffer.WriteByte(byte(v)) + case common.Hash: + buffer.Write(v.Bytes()) + case common.Address: + buffer.Write(v.Bytes()) default: // should be a compile-time error... panic(fmt.Errorf("unsupported type: %T", v)) From 95a378640e21f89ac08413dc89415d183cc4d537 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 16:46:06 +0300 Subject: [PATCH 33/73] initial paymaster test flows --- core/state_processor_rip7560.go | 32 ++++++++++++---- tests/rip7560/paymaster_test.go | 67 +++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 8 deletions(-) create mode 100644 tests/rip7560/paymaster_test.go diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 208e4609de5b..0ceb83448e7e 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -37,9 +37,16 @@ func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validU } func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte) { + if len(paymasterValidationReturn) < 96 { + return nil, nil + } validationData = paymasterValidationReturn[0:32] //2nd bytes32 is ignored (its an offset value) contextLen := new(big.Int).SetBytes(paymasterValidationReturn[64:96]) + if uint64(len(paymasterValidationReturn)) < 96+contextLen.Uint64() { + return nil, nil + } + context = paymasterValidationReturn[96 : 96+contextLen.Uint64()] return } @@ -133,17 +140,17 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St mgval = mgval.Mul(mgval, gasFeeCap) balanceCheck := new(uint256.Int).Set(mgval) - chargeFrom := *st.Sender + chargeFrom := st.Sender - if len(st.PaymasterData) >= 20 { - chargeFrom = [20]byte(st.PaymasterData[:20]) + if st.Paymaster != nil { + chargeFrom = st.Paymaster } - if have, want := state.GetBalance(chargeFrom), balanceCheck; have.Cmp(want) < 0 { + if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) } - state.SubBalance(chargeFrom, mgval, 0) + state.SubBalance(*chargeFrom, mgval, 0) return nil } @@ -210,6 +217,9 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte } paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) + if err != nil { + return nil, err + } vpr := &ValidationPhaseResult{ Tx: tx, TxHash: tx.Hash(), @@ -241,6 +251,9 @@ func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.Ch if err != nil { return nil, 0, 0, 0, err } + if resultPm.Failed() { + return nil, 0, 0, 0, resultPm.Err + } statedb.IntermediateRoot(true) if resultPm.Failed() { return nil, 0, 0, 0, errors.New("paymaster validation failed - invalid transaction") @@ -370,10 +383,10 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { tx := baseTx.Rip7560TransactionData() - if len(tx.PaymasterData) < 20 { + paymasterAddress := tx.Paymaster + if paymasterAddress == nil { return nil, nil } - var paymasterAddress common.Address = [20]byte(tx.PaymasterData[0:20]) jsondata := `[ {"type":"function","name":"validatePaymasterTransaction","inputs": [{"name": "version","type": "uint256"},{"name": "txHash","type": "bytes32"},{"name": "transaction","type": "bytes"}]} ]` @@ -387,7 +400,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params } return &Message{ From: config.EntryPointAddress, - To: &paymasterAddress, + To: paymasterAddress, Value: big.NewInt(0), GasLimit: tx.PaymasterGas, GasPrice: tx.GasFeeCap, @@ -470,6 +483,9 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid return nil, 0, 0, errors.New("invalid paymaster return data length") } validationData, context := UnpackPaymasterValidationReturn(data) + if validationData == nil { + return nil, 0, 0, errors.New("invalid paymaster return data") + } magicExpected, validAfter, validUntil := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go new file mode 100644 index 000000000000..bb2cd6c67018 --- /dev/null +++ b/tests/rip7560/paymaster_test.go @@ -0,0 +1,67 @@ +package rip7560 + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "math/big" + "slices" + "testing" +) + +var DEFAULT_PAYMASTER = common.HexToAddress("0xaaaaaaaaaabbbbbbbbbbccccccccccdddddddddd") + +func TestPaymasterValidationFailure_nobalance(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), 1), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "insufficient funds for gas * price + value: address 0xaaAaaAAAAAbBbbbbBbBBCCCCcCCCcCdddDDDdddd have 1 want 1000000000000000000") +} + +func TestPaymasterValidationFailure_oog(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "out of gas") +} +func TestPaymasterValidationFailure_revert(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + PaymasterGas: 1000000000, + }, "execution reverted") +} + +func asBytes32(a int) []byte { + return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) +} +func paymasterReturnValue(magic, validAfter, validUntil uint64, context []byte) []byte { + validationData := core.PackValidationData(magic, validUntil, validAfter) + //manual encode (bytes32 validationData, bytes context) + return slices.Concat( + common.LeftPadBytes(validationData, 32), + asBytes32(64), + asBytes32(len(context)), + context) +} + +func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "invalid paymaster return data") +} From 7331b96c2719023313f19e131d249b65149c76d4 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 14:17:25 +0300 Subject: [PATCH 34/73] returnWithData, revertWithData arbitrary-length return or revert (currently, can't test it, since we check "error", and actual revert data is hidden. will be probably needed when testing paymaster.) --- tests/rip7560/rip7560TestUtils.go | 38 +++++++++++++++++++++++-------- tests/rip7560/validation_test.go | 15 +++++++----- 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 1e50d9b7fbff..a60b4730663f 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -83,20 +83,38 @@ func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) return tt } -// generate the code to return the given byte array (up to 32 bytes) -func returnData(data []byte) []byte { - datalen := len(data) - if datalen > 32 { - panic(fmt.Errorf("data length is too big %v", data)) +// create code to copy data into memory at the given offset +// NOTE: if data is not in 32-byte multiples, it will override the next bytes +// used by RETURN/REVERT +func copyToMemory(data []byte, offset uint) []byte { + ret := []byte{} + for len(data) > 32 { + ret = append(ret, createCode(vm.PUSH32, data[0:32], vm.PUSH2, uint16(offset), vm.MSTORE)...) + data = data[32:] + offset = offset + 32 + } + + if len(data) > 0 { + PUSHn := byte(int(vm.PUSH0) + len(data)) + ret = append(ret, createCode(PUSHn, data, vm.PUSH2, uint16(offset), vm.MSTORE)...) } + return ret +} - PUSHn := byte(int(vm.PUSH0) + datalen) - ret := createCode(PUSHn, data, vm.PUSH0, vm.MSTORE, vm.PUSH1, datalen, vm.PUSH1, 0, vm.RETURN) +// revert with given data +func revertWithData(data []byte) []byte { + ret := append(copyToMemory(data, 0), createCode(vm.PUSH2, uint16(len(data)), vm.PUSH0, vm.REVERT)...) + return ret +} + +// generate the code to return the given byte array (up to 32 bytes) +func returnWithData(data []byte) []byte { + ret := append(copyToMemory(data, 0), createCode(vm.PUSH2, uint16(len(data)), vm.PUSH0, vm.RETURN)...) return ret } func createAccountCode() []byte { - return returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 0, 0)) + return returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 0, 0)) } // create EVM code from OpCode, byte and []bytes @@ -115,9 +133,11 @@ func createCode(items ...interface{}) []byte { buffer.Write(v) case int8: buffer.WriteByte(byte(v)) + case uint16: + buffer.Write([]byte{byte(v >> 8), byte(v)}) case int: if v >= 256 { - panic(fmt.Errorf("int defaults to int8 (byte). int16, etc: %v", v)) + panic(fmt.Errorf("int defaults to int8 (byte). use int16, etc: %v", v)) } buffer.WriteByte(byte(v)) default: diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index cdbd5d3c2463..1afec6c7f01e 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -5,6 +5,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/tests" + "github.com/status-im/keycard-go/hexutils" "github.com/stretchr/testify/assert" "math/big" "testing" @@ -43,7 +44,7 @@ func TestValidationFailure_no_balance(t *testing.T) { } func TestValidationFailure_sigerror(t *testing.T) { - handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "account signature error") @@ -104,9 +105,11 @@ func TestValidationFailure_account_revert(t *testing.T) { }, "execution reverted") } -func TestValidationFailure_account_out_of_range(t *testing.T) { +func TestValidationFailure_account_revert_with_reason(t *testing.T) { + // cast abi-encode 'Error(string)' hello + reason := hexutils.HexToBytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000568656c6c6f000000000000000000000000000000000000000000000000000000") handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + revertWithData(reason), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "execution reverted") @@ -114,7 +117,7 @@ func TestValidationFailure_account_out_of_range(t *testing.T) { func TestValidationFailure_account_wrong_return_length(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") @@ -122,7 +125,7 @@ func TestValidationFailure_account_wrong_return_length(t *testing.T) { func TestValidationFailure_account_no_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") @@ -131,7 +134,7 @@ func TestValidationFailure_account_no_return_value(t *testing.T) { func TestValidationFailure_account_wrong_return_value(t *testing.T) { // create buffer of 32 byte array handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(make([]byte, 32)), + returnWithData(make([]byte, 32)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), From d804c44262f8b2f9781e71d03b1038c38f6ab5c5 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 20:49:00 +0300 Subject: [PATCH 35/73] test paymaster validations --- core/state_processor_rip7560.go | 7 ++-- tests/rip7560/paymaster_test.go | 56 +++++++++++++++++++++++-------- tests/rip7560/rip7560TestUtils.go | 34 +++++++++++++++++++ tests/rip7560/validation_test.go | 4 +-- 4 files changed, 83 insertions(+), 18 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 0ceb83448e7e..aa815ad76d3c 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -447,10 +447,10 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC if err != nil { return nil, err } - var paymasterAddress common.Address = [20]byte(tx.PaymasterData[0:20]) + var paymasterAddress = tx.Paymaster return &Message{ From: chainConfig.EntryPointAddress, - To: &paymasterAddress, + To: paymasterAddress, Value: big.NewInt(0), GasLimit: tx.PaymasterGas - executionResult.UsedGas, GasPrice: tx.GasFeeCap, @@ -490,6 +490,9 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } + if len(context) > PAYMASTER_MAX_CONTEXT_SIZE { + return nil, 0, 0, errors.New("paymaster context too large") + } return context, validAfter, validUntil, nil } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index bb2cd6c67018..4d9a37ce4e31 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -6,7 +6,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "math/big" - "slices" "testing" ) @@ -42,19 +41,6 @@ func TestPaymasterValidationFailure_revert(t *testing.T) { }, "execution reverted") } -func asBytes32(a int) []byte { - return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) -} -func paymasterReturnValue(magic, validAfter, validUntil uint64, context []byte) []byte { - validationData := core.PackValidationData(magic, validUntil, validAfter) - //manual encode (bytes32 validationData, bytes context) - return slices.Concat( - common.LeftPadBytes(validationData, 32), - asBytes32(64), - asBytes32(len(context)), - context) -} - func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). @@ -65,3 +51,45 @@ func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { Paymaster: &DEFAULT_PAYMASTER, }, "invalid paymaster return data") } + +func TestPaymasterValidationFailure_wrong_magic(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(1, 2, 3, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "paymaster did not return correct MAGIC_VALUE") +} + +func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { + //paymaster returning huge context. + // first word is magic return value + // 2nd word is offset (fixed 64) + // 3rd word is length of context (max+1) + // then we return the total length of above (context itself is uninitialized string of max+1 zeroes) + pmCode := createCode( + //vm.PUSH1, 1, vm.PUSH0, vm.RETURN, + copyToMemory(core.PackValidationData(core.MAGIC_VALUE_PAYMASTER, 0, 0), 0), + copyToMemory(asBytes32(64), 32), + copyToMemory(asBytes32(core.PAYMASTER_MAX_CONTEXT_SIZE+1), 64), + push(core.PAYMASTER_MAX_CONTEXT_SIZE+96+1), vm.PUSH0, vm.RETURN) + + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), pmCode, DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "paymaster context too large") +} + +func TestPaymasterValidation_ok(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "ok") +} diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index a60b4730663f..82c46650dd95 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/status-im/keycard-go/hexutils" "math/big" + "slices" "testing" ) @@ -83,6 +84,23 @@ func (tt *testContextBuilder) withCode(addr string, code []byte, balance int64) return tt } +// generate a push opcode and its following constant value +func push(n int) []byte { + if n < 0 { + panic("attempt to push negative") + } + if n < 256 { + return createCode(vm.PUSH1, byte(n)) + } + if n < 65536 { + return createCode(vm.PUSH2, byte(n>>8), byte(n)) + } + if n < 1<<32 { + return createCode(vm.PUSH4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) + } + panic("larger number") +} + // create code to copy data into memory at the given offset // NOTE: if data is not in 32-byte multiples, it will override the next bytes // used by RETURN/REVERT @@ -135,6 +153,8 @@ func createCode(items ...interface{}) []byte { buffer.WriteByte(byte(v)) case uint16: buffer.Write([]byte{byte(v >> 8), byte(v)}) + case uint32: + buffer.Write([]byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}) case int: if v >= 256 { panic(fmt.Errorf("int defaults to int8 (byte). use int16, etc: %v", v)) @@ -148,3 +168,17 @@ func createCode(items ...interface{}) []byte { return buffer.Bytes() } + +func asBytes32(a int) []byte { + return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) +} + +func paymasterReturnValue(magic, validUntil, validAfter uint64, context []byte) []byte { + validationData := core.PackValidationData(magic, validUntil, validAfter) + //manual encode (bytes32 validationData, bytes context) + return slices.Concat( + common.LeftPadBytes(validationData, 32), + asBytes32(64), + asBytes32(len(context)), + context) +} diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 1afec6c7f01e..c7a75fdc9127 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -53,7 +53,7 @@ func TestValidationFailure_sigerror(t *testing.T) { func TestValidationFailure_validAfter(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity not reached yet") @@ -62,7 +62,7 @@ func TestValidationFailure_validAfter(t *testing.T) { func TestValidationFailure_validUntil(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGas: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity expired") From 604c76341eebec28831c297e230bc65d4ac6feb1 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 1 Jul 2024 22:11:30 +0300 Subject: [PATCH 36/73] paymaster time-range checking --- core/state_processor_rip7560.go | 2 +- tests/rip7560/paymaster_test.go | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index aa815ad76d3c..9e0333ed4d27 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -486,7 +486,7 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid if validationData == nil { return nil, 0, 0, errors.New("invalid paymaster return data") } - magicExpected, validAfter, validUntil := UnpackValidationData(validationData) + magicExpected, validUntil, validAfter := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index 4d9a37ce4e31..e6a026d27676 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -84,6 +84,26 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { }, "paymaster context too large") } +func TestPaymasterValidationFailure_validAfter(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 300, 200, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "RIP-7560 transaction validity not reached yet") +} + +func TestPaymasterValidationFailure_validUntil(t *testing.T) { + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 1, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + ValidationGas: 1000000000, + PaymasterGas: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + }, "RIP-7560 transaction validity expired") +} + func TestPaymasterValidation_ok(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ From eb7127c97d97b1cbe46353e6d9ff8cb3400c7951 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sat, 6 Jul 2024 15:49:03 +0300 Subject: [PATCH 37/73] pr reviews --- core/state_processor_rip7560.go | 26 ++++++++++++-------------- tests/rip7560/paymaster_test.go | 4 ++-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 9e0333ed4d27..599ee35682ab 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -36,15 +36,18 @@ func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validU return } -func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte) { +func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte, err error) { if len(paymasterValidationReturn) < 96 { - return nil, nil + return nil, nil, errors.New("paymaster return data: too short") } validationData = paymasterValidationReturn[0:32] //2nd bytes32 is ignored (its an offset value) contextLen := new(big.Int).SetBytes(paymasterValidationReturn[64:96]) if uint64(len(paymasterValidationReturn)) < 96+contextLen.Uint64() { - return nil, nil + return nil, nil, errors.New("paymaster return data: unable to decode context") + } + if contextLen.Cmp(big.NewInt(PAYMASTER_MAX_CONTEXT_SIZE)) > 0 { + return nil, nil, errors.New("paymaster return data: context too large") } context = paymasterValidationReturn[96 : 96+contextLen.Uint64()] @@ -383,8 +386,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { tx := baseTx.Rip7560TransactionData() - paymasterAddress := tx.Paymaster - if paymasterAddress == nil { + if tx.Paymaster == nil { return nil, nil } jsondata := `[ @@ -400,7 +402,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params } return &Message{ From: config.EntryPointAddress, - To: paymasterAddress, + To: tx.Paymaster, Value: big.NewInt(0), GasLimit: tx.PaymasterGas, GasPrice: tx.GasFeeCap, @@ -447,10 +449,9 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC if err != nil { return nil, err } - var paymasterAddress = tx.Paymaster return &Message{ From: chainConfig.EntryPointAddress, - To: paymasterAddress, + To: tx.Paymaster, Value: big.NewInt(0), GasLimit: tx.PaymasterGas - executionResult.UsedGas, GasPrice: tx.GasFeeCap, @@ -482,17 +483,14 @@ func validatePaymasterReturnData(data []byte) (context []byte, validAfter, valid if len(data) < 32 { return nil, 0, 0, errors.New("invalid paymaster return data length") } - validationData, context := UnpackPaymasterValidationReturn(data) - if validationData == nil { - return nil, 0, 0, errors.New("invalid paymaster return data") + validationData, context, err := UnpackPaymasterValidationReturn(data) + if err != nil { + return nil, 0, 0, err } magicExpected, validUntil, validAfter := UnpackValidationData(validationData) if magicExpected != MAGIC_VALUE_PAYMASTER { return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") } - if len(context) > PAYMASTER_MAX_CONTEXT_SIZE { - return nil, 0, 0, errors.New("paymaster context too large") - } return context, validAfter, validUntil, nil } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index e6a026d27676..b169e0be239c 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -49,7 +49,7 @@ func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { PaymasterGas: 1000000000, GasFeeCap: big.NewInt(1000000000), Paymaster: &DEFAULT_PAYMASTER, - }, "invalid paymaster return data") + }, "paymaster return data: too short") } func TestPaymasterValidationFailure_wrong_magic(t *testing.T) { @@ -81,7 +81,7 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { PaymasterGas: 1000000000, GasFeeCap: big.NewInt(1000000000), Paymaster: &DEFAULT_PAYMASTER, - }, "paymaster context too large") + }, "paymaster return data: context too large") } func TestPaymasterValidationFailure_validAfter(t *testing.T) { From 71c013f9d9dd70e65becadaa8c885b269a1b68f4 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Wed, 17 Jul 2024 23:10:15 +0200 Subject: [PATCH 38/73] AA-247: Implement tracer (#11) * WIP: bring in old 4337 Bundler Collector Tracer code (compiles) * Fix runtime errors (WIP) * Adding missing fields, using OnEnter hook * Fix using [0:20] bytes of "PaymasterData" instead of the new "Paymaster" field * Fix banned opcode checks for Factory frame * Add 'nil' check --------- Co-authored-by: shahafn --- core/state_processor_rip7560.go | 14 +- eth/tracers/native/rip7560_validation.go | 289 +++++++++++++++++++++-- 2 files changed, 284 insertions(+), 19 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 13d4558ba231..9dd022982669 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -145,8 +145,8 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St chargeFrom := st.Sender - if st.Paymaster != nil { - chargeFrom = st.Paymaster + if st.Paymaster != nil && st.Paymaster.Cmp(common.Address{}) != 0 { + chargeFrom = *st.Paymaster } if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { @@ -183,6 +183,11 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte GasPrice: tx.GasFeeCap(), } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) + + if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil { + evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, common.Address{}) + } + /*** Deployer Frame ***/ deployerMsg := prepareDeployerMessage(tx, chainConfig) var deploymentUsedGas uint64 @@ -202,7 +207,6 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte err = errors.New("sender not deployed") } if err != nil { - // TODO: bubble up the inner error message to the user, if possible return nil, fmt.Errorf("account deployment failed: %v", err) } statedb.IntermediateRoot(true) @@ -348,7 +352,7 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { tx := baseTx.Rip7560TransactionData() - if tx.Deployer == nil { + if tx.Deployer == nil || tx.Deployer.Cmp(common.Address{}) == 0 { return nil } return &Message{ @@ -395,7 +399,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { tx := baseTx.Rip7560TransactionData() - if tx.Paymaster == nil { + if tx.Paymaster == nil || tx.Paymaster.Cmp(common.Address{}) == 0 { return nil, nil } jsondata := `[ diff --git a/eth/tracers/native/rip7560_validation.go b/eth/tracers/native/rip7560_validation.go index 459e3c50fff0..9cdc90e2b34c 100644 --- a/eth/tracers/native/rip7560_validation.go +++ b/eth/tracers/native/rip7560_validation.go @@ -2,18 +2,54 @@ package native import ( "encoding/json" - "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/holiman/uint256" + "math/big" + "regexp" + "strings" ) func init() { tracers.DefaultDirectory.Register("rip7560Validation", newRip7560Tracer, false) } +/******* taken from ERC-4337 bundler collector tracer *******/ + +type partialStack = []*uint256.Int + +type lastThreeOpCodesItem struct { + Opcode string + StackTop3 partialStack +} + +type contractSizeVal struct { + ContractSize int `json:"contractSize"` + Opcode string `json:"opcode"` +} + +type access struct { + Reads map[string]string `json:"reads"` + Writes map[string]uint64 `json:"writes"` +} + +// note - this means an individual 'frame' in 7560 (validate, execute, postOp) +type entryPointCall struct { + //TopLevelMethodSig hexutil.Bytes `json:"topLevelMethodSig"` + TopLevelTargetAddress common.Address `json:"topLevelTargetAddress"` + Access map[common.Address]*access `json:"access"` + Opcodes map[string]uint64 `json:"opcodes"` + ExtCodeAccessInfo map[common.Address]string `json:"extCodeAccessInfo"` + ContractSize map[common.Address]*contractSizeVal `json:"contractSize"` + OOG bool `json:"oog"` +} + +/******* *******/ + const ValidationFramesMaxCount = 3 func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { @@ -23,14 +59,31 @@ func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trace return nil, err } } + allowedOpcodeRegex, err := regexp.Compile( + `^(DUP\d+|PUSH\d+|SWAP\d+|POP|ADD|SUB|MUL|DIV|EQ|LTE?|S?GTE?|SLT|SH[LR]|AND|OR|NOT|ISZERO)$`, + ) + if err != nil { + return nil, err + } + // TODO FIX mock fields t := &rip7560ValidationTracer{ TraceResults: make([]stateMap, ValidationFramesMaxCount), UsedOpcodes: make([]map[string]bool, ValidationFramesMaxCount), Created: make([]map[common.Address]bool, ValidationFramesMaxCount), - Deleted: make([]map[common.Address]bool, ValidationFramesMaxCount), + //Deleted: make([]map[common.Address]bool, ValidationFramesMaxCount), + + allowedOpcodeRegex: allowedOpcodeRegex, + lastThreeOpCodes: make([]*lastThreeOpCodesItem, 0), + CurrentLevel: nil, + lastOp: "", + Calls: make([]*callsItem, 0), + Keccak: make([]hexutil.Bytes, 0), + Logs: make([]*logsItem, 0), } + return &tracers.Tracer{ Hooks: &tracing.Hooks{ + OnEnter: t.OnEnter, OnTxStart: t.OnTxStart, OnTxEnd: t.OnTxEnd, OnOpcode: t.OnOpcode, @@ -40,38 +93,246 @@ func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trace }, nil } +type callsItem struct { + // Common + Type string `json:"type"` + + // Enter info + From common.Address `json:"from"` + To common.Address `json:"to"` + Method hexutil.Bytes `json:"method"` + Value *hexutil.Big `json:"value"` + Gas uint64 `json:"gas"` + + // Exit info + GasUsed uint64 `json:"gasUsed"` + Data hexutil.Bytes `json:"data"` +} + +type logsItem struct { + Data hexutil.Bytes `json:"data"` + Topic []hexutil.Bytes `json:"topic"` +} + // Array fields contain of all access details of all validation frames type rip7560ValidationTracer struct { + //rip7560TxData *types.Rip7560AccountAbstractionTx + env *tracing.VMContext TraceResults []stateMap `json:"traceResults"` UsedOpcodes []map[string]bool `json:"usedOpcodes"` Created []map[common.Address]bool `json:"created"` - Deleted []map[common.Address]bool `json:"deleted"` + //Deleted []map[common.Address]bool `json:"deleted"` + + lastThreeOpCodes []*lastThreeOpCodesItem + allowedOpcodeRegex *regexp.Regexp `json:"allowedOpcodeRegex,omitempty"` + CurrentLevel *entryPointCall + lastOp string + CallsFromEntryPoint []*entryPointCall `json:"callsFromEntryPoint,omitempty"` + Keccak []hexutil.Bytes `json:"keccak"` + Calls []*callsItem `json:"calls"` + Logs []*logsItem `json:"logs"` + // todo //interrupt atomic.Bool // Atomic flag to signal execution interruption //reason error // Textual reason for the interruption } -func (t *rip7560ValidationTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { +func (b *rip7560ValidationTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if depth == 0 { + b.createNewTopLevelFrame(to) + } +} + +func (b *rip7560ValidationTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + b.env = env + //b.rip7560TxData = tx.Rip7560TransactionData() +} + +func (b *rip7560ValidationTracer) createNewTopLevelFrame(addr common.Address) { + b.CurrentLevel = &entryPointCall{ + TopLevelTargetAddress: addr, + Access: map[common.Address]*access{}, + Opcodes: map[string]uint64{}, + ExtCodeAccessInfo: map[common.Address]string{}, + ContractSize: map[common.Address]*contractSizeVal{}, + OOG: false, + } + b.CallsFromEntryPoint = append(b.CallsFromEntryPoint, b.CurrentLevel) + b.lastOp = "" + return +} + +func (b *rip7560ValidationTracer) OnTxEnd(receipt *types.Receipt, err error) { +} + +func (b *rip7560ValidationTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + opcode := vm.OpCode(op).String() + + stackSize := len(scope.StackData()) + stackTop3 := partialStack{} + for i := 0; i < 3 && i < stackSize; i++ { + stackTop3 = append(stackTop3, StackBack(scope.StackData(), i)) + } + b.lastThreeOpCodes = append(b.lastThreeOpCodes, &lastThreeOpCodesItem{ + Opcode: opcode, + StackTop3: stackTop3, + }) + if len(b.lastThreeOpCodes) > 3 { + b.lastThreeOpCodes = b.lastThreeOpCodes[1:] + } + + if gas < cost || (opcode == "SSTORE" && gas < 2300) { + b.CurrentLevel.OOG = true + } + + if opcode == "REVERT" || opcode == "RETURN" { + // exit() is not called on top-level return/revert, so we reconstruct it from opcode + if depth == 1 { + // TODO: uncomment and fix with StackBack + //ofs := scope.Stack.Back(0).ToBig().Int64() + //len := scope.Stack.Back(1).ToBig().Int64() + //data := scope.Memory.GetCopy(ofs, len) + //b.Calls = append(b.Calls, &callsItem{ + // Type: opcode, + // GasUsed: 0, + // Data: data, + //}) + } + // NOTE: flushing all history after RETURN + b.lastThreeOpCodes = []*lastThreeOpCodesItem{} + } + + // not pasting the new "entryPointCall" detection here - not necessary for 7560 + + var lastOpInfo *lastThreeOpCodesItem + if len(b.lastThreeOpCodes) >= 2 { + lastOpInfo = b.lastThreeOpCodes[len(b.lastThreeOpCodes)-2] + } + // store all addresses touched by EXTCODE* opcodes + if lastOpInfo != nil && strings.HasPrefix(lastOpInfo.Opcode, "EXT") { + addr := common.HexToAddress(lastOpInfo.StackTop3[0].Hex()) + ops := []string{} + for _, item := range b.lastThreeOpCodes { + ops = append(ops, item.Opcode) + } + last3OpcodeStr := strings.Join(ops, ",") + + // only store the last EXTCODE* opcode per address - could even be a boolean for our current use-case + // [OP-051] + if !strings.Contains(last3OpcodeStr, ",EXTCODESIZE,ISZERO") { + b.CurrentLevel.ExtCodeAccessInfo[addr] = opcode + } + } + + // [OP-041] + if b.isEXTorCALL(opcode) { + n := 0 + if !strings.HasPrefix(opcode, "EXT") { + n = 1 + } + addr := common.BytesToAddress(StackBack(scope.StackData(), n).Bytes()) + + if _, ok := b.CurrentLevel.ContractSize[addr]; !ok && !b.isAllowedPrecompile(addr) { + b.CurrentLevel.ContractSize[addr] = &contractSizeVal{ + ContractSize: len(b.env.StateDB.GetCode(addr)), + Opcode: opcode, + } + } + } + + // [OP-012] + if b.lastOp == "GAS" && !strings.Contains(opcode, "CALL") { + b.incrementCount(b.CurrentLevel.Opcodes, "GAS") + } + // ignore "unimportant" opcodes + if opcode != "GAS" && !b.allowedOpcodeRegex.MatchString(opcode) { + b.incrementCount(b.CurrentLevel.Opcodes, opcode) + } + b.lastOp = opcode + + if opcode == "SLOAD" || opcode == "SSTORE" { + slot := common.BytesToHash(StackBack(scope.StackData(), 0).Bytes()) + slotHex := slot.Hex() + addr := scope.Address() + if _, ok := b.CurrentLevel.Access[addr]; !ok { + b.CurrentLevel.Access[addr] = &access{ + Reads: map[string]string{}, + Writes: map[string]uint64{}, + } + } + access := *b.CurrentLevel.Access[addr] + + if opcode == "SLOAD" { + // read slot values before this UserOp was created + // (so saving it if it was written before the first read) + _, rOk := access.Reads[slotHex] + _, wOk := access.Writes[slotHex] + if !rOk && !wOk { + access.Reads[slotHex] = b.env.StateDB.GetState(addr, slot).Hex() + } + } else { + b.incrementCount(access.Writes, slotHex) + } + } + + if opcode == "KECCAK256" { + // TODO: uncomment and fix with StackBack + // collect keccak on 64-byte blocks + // ofs := scope.Stack.Back(0).ToBig().Int64() + // len := scope.Stack.Back(1).ToBig().Int64() + // // currently, solidity uses only 2-word (6-byte) for a key. this might change..still, no need to + // // return too much + // if len > 20 && len < 512 { + // b.Keccak = append(b.Keccak, scope.Memory.GetCopy(ofs, len)) + // } + //} else if strings.HasPrefix(opcode, "LOG") { + // count, _ := strconv.Atoi(opcode[3:]) + // ofs := scope.Stack.Back(0).ToBig().Int64() + // len := scope.Stack.Back(1).ToBig().Int64() + // topics := []hexutil.Bytes{} + // for i := 0; i < count; i++ { + // topics = append(topics, scope.Stack.Back(2+i).Bytes()) + // } + // + // b.Logs = append(b.Logs, &logsItem{ + // Data: scope.Memory.GetCopy(ofs, len), + // Topic: topics, + // }) + } +} + +// StackBack returns the n-th item in stack +func StackBack(stackData []uint256.Int, n int) *uint256.Int { + return &stackData[len(stackData)-n-1] +} +func (b *rip7560ValidationTracer) isEXTorCALL(opcode string) bool { + return strings.HasPrefix(opcode, "EXT") || + opcode == "CALL" || + opcode == "CALLCODE" || + opcode == "DELEGATECALL" || + opcode == "STATICCALL" } -func (t *rip7560ValidationTracer) OnTxEnd(receipt *types.Receipt, err error) { +// not using 'isPrecompiled' to only allow the ones defined by the ERC-7562 as stateless precompiles +// [OP-062] +func (b *rip7560ValidationTracer) isAllowedPrecompile(addr common.Address) bool { + addrInt := addr.Big() + return addrInt.Cmp(big.NewInt(0)) == 1 && addrInt.Cmp(big.NewInt(10)) == -1 } -func (t *rip7560ValidationTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { - opcodeName := vm.OpCode(opcode).String() - fmt.Printf("%s %d %d\n", opcodeName, cost, depth) - if t.UsedOpcodes[0] == nil { - t.UsedOpcodes[0] = make(map[string]bool) +func (b *rip7560ValidationTracer) incrementCount(m map[string]uint64, k string) { + if _, ok := m[k]; !ok { + m[k] = 0 } - t.UsedOpcodes[0][opcodeName] = true + m[k]++ } -func (t *rip7560ValidationTracer) GetResult() (json.RawMessage, error) { - jsonResult, err := json.MarshalIndent(*t, "", " ") +func (b *rip7560ValidationTracer) GetResult() (json.RawMessage, error) { + jsonResult, err := json.MarshalIndent(*b, "", " ") return jsonResult, err } -func (t *rip7560ValidationTracer) Stop(err error) { +func (b *rip7560ValidationTracer) Stop(err error) { } From 22c5f97370a749f4785784cc0b3cd1a986af1902 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 18 Jul 2024 08:50:41 +0200 Subject: [PATCH 39/73] Fix syntax error --- core/state_processor_rip7560.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 9dd022982669..8370f5046d8e 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -146,7 +146,7 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St chargeFrom := st.Sender if st.Paymaster != nil && st.Paymaster.Cmp(common.Address{}) != 0 { - chargeFrom = *st.Paymaster + chargeFrom = st.Paymaster } if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { From 14634daac58478a9e5aa3b764c5b69cdd5b8dd4a Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 18 Jul 2024 10:56:25 +0200 Subject: [PATCH 40/73] I don't understand why would we need & 0xFF here, please elaborate --- core/state_processor_rip7560.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 8370f5046d8e..ba1cdcf3e538 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -31,7 +31,7 @@ func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validU t := new(big.Int).SetBytes(validationData) authorizerMagic = t.Uint64() - validUntil = t.Rsh(t, 160).Uint64() & 0xffffff + validUntil = t.Rsh(t, 160).Uint64() // & 0xffffff validAfter = t.Rsh(t, 48).Uint64() return } From 736e0e26387755231973b7e581b98f7af4a4e63e Mon Sep 17 00:00:00 2001 From: shahafn Date: Thu, 18 Jul 2024 20:45:50 +0300 Subject: [PATCH 41/73] Fixing validationData packing --- core/state_processor_rip7560.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index ba1cdcf3e538..2fc51deeb304 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -29,10 +29,9 @@ func PackValidationData(authorizerMagic uint64, validUntil, validAfter uint64) [ func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validUntil, validAfter uint64) { - t := new(big.Int).SetBytes(validationData) - authorizerMagic = t.Uint64() - validUntil = t.Rsh(t, 160).Uint64() // & 0xffffff - validAfter = t.Rsh(t, 48).Uint64() + authorizerMagic = new(big.Int).SetBytes(validationData[:20]).Uint64() + validUntil = new(big.Int).SetBytes(validationData[20:26]).Uint64() + validAfter = new(big.Int).SetBytes(validationData[26:32]).Uint64() return } From 171298568300ba9ea5a4a7a9d0e552b0aa0e916f Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 18 Jul 2024 23:09:15 +0200 Subject: [PATCH 42/73] Fix: do not lose the AA tx application error; fix lost 'nonce' value --- core/types/tx_rip7560.go | 2 +- internal/ethapi/transaction_args.go | 1 + miner/worker.go | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 76443ac0785b..59e279257014 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -106,7 +106,7 @@ func (tx *Rip7560AccountAbstractionTx) gasFeeCap() *big.Int { return tx.GasFe func (tx *Rip7560AccountAbstractionTx) gasTipCap() *big.Int { return tx.GasTipCap } func (tx *Rip7560AccountAbstractionTx) gasPrice() *big.Int { return tx.GasFeeCap } func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return tx.Value } -func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return 0 } +func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return tx.Nonce } func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return tx.To } func (tx *Rip7560AccountAbstractionTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 06b5ae60894a..f8b891e65655 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -493,6 +493,7 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { To: &common.Address{}, ChainID: (*big.Int)(args.ChainID), Gas: uint64(*args.Gas), + Nonce: uint64(*args.Nonce), GasFeeCap: (*big.Int)(args.MaxFeePerGas), GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas), Value: (*big.Int)(args.Value), diff --git a/miner/worker.go b/miner/worker.go index c52a9c7eb092..2cab82899e1f 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -431,6 +431,7 @@ func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment) pendingBundle, err := miner.txpool.PendingRip7560Bundle() if pendingBundle != nil { if err = miner.commitRip7560TransactionsBundle(env, pendingBundle, interrupt); err != nil { + log.Error(err.Error()) return err } } From 9bb4fb03288d481c7c41390dc54a18b7a95e8192 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Sun, 21 Jul 2024 11:11:52 +0200 Subject: [PATCH 43/73] Move the 'TraceRip7560Validation' API to the 'eth' namespace, from the 'debug' --- eth/tracers/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/tracers/api.go b/eth/tracers/api.go index e04f503aeba6..0c2a6244b0f6 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1023,7 +1023,7 @@ func APIs(backend Backend) []rpc.API { Namespace: "debug", Service: NewAPI(backend), }, { - Namespace: "debug", + Namespace: "eth", Service: NewRip7560API(backend), }, } From 4dc333d54f18f346260800c214df272c1123558f Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Tue, 23 Jul 2024 14:51:10 +0300 Subject: [PATCH 44/73] check gas, nonce form ApplyRip7560ValidationPhases (#16) move validations into the validation phase. note that it breaks some tests which rely on wrong nonce. --- core/state_processor_rip7560.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 2fc51deeb304..f24f63c3a875 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -95,19 +95,9 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta break } - aatx := tx.Rip7560TransactionData() statedb.SetTxContext(tx.Hash(), index+i) - err := CheckNonceRip7560(aatx, statedb) - if err != nil { - return nil, nil, nil, err - } - err = BuyGasRip7560Transaction(aatx, statedb) - if err != nil { - return nil, nil, nil, err - } - var vpr *ValidationPhaseResult - vpr, err = ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) + vpr, err := ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) if err != nil { return nil, nil, nil, err } @@ -175,6 +165,16 @@ func CheckNonceRip7560(tx *types.Rip7560AccountAbstractionTx, st *state.StateDB) } func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { + aatx := tx.Rip7560TransactionData() + err := CheckNonceRip7560(aatx, statedb) + if err != nil { + return nil, err + } + err = BuyGasRip7560Transaction(aatx, statedb) + if err != nil { + return nil, err + } + blockContext := NewEVMBlockContext(header, bc, author) sender := tx.Rip7560TransactionData().Sender txContext := vm.TxContext{ From 7f93f508c1cd76a447c460186935a3791b507339 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Wed, 24 Jul 2024 16:06:03 +0300 Subject: [PATCH 45/73] fix nonce, entrypoint (#17) - add correct sender address for AA transactions (AA_ENTRY_POINT, and AA_CREATOR for the deployment frame) - disable nonce increment in TransitionDb if the sender is either of these magic addresses - add AA-specific nonce increment for the transaction sender. --- core/state_processor_rip7560.go | 13 +++++++++---- core/state_transition.go | 4 +++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index f24f63c3a875..90e16428801e 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -19,6 +19,9 @@ const MAGIC_VALUE_PAYMASTER = uint64(0xe0e6183a) const MAGIC_VALUE_SIGFAIL = uint64(0x31665494) const PAYMASTER_MAX_CONTEXT_SIZE = 65536 +var AA_ENTRY_POINT = common.HexToAddress("0x0000000000000000000000000000000000007560") +var AA_SENDER_CREATOR = common.HexToAddress("0x00000000000000000000000000000000ffff7560") + func PackValidationData(authorizerMagic uint64, validUntil, validAfter uint64) []byte { t := new(big.Int).SetUint64(uint64(validAfter)) @@ -209,6 +212,8 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte return nil, fmt.Errorf("account deployment failed: %v", err) } statedb.IntermediateRoot(true) + } else { + statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) } /*** Account Validation Frame ***/ @@ -355,7 +360,7 @@ func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfi return nil } return &Message{ - From: config.DeployerCallerAddress, + From: AA_SENDER_CREATOR, To: tx.Deployer, Value: big.NewInt(0), GasLimit: tx.ValidationGas, @@ -382,7 +387,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par txAbiEncoding, err := tx.AbiEncode() validateTransactionData, err := validateTransactionAbi.Pack("validateTransaction", big.NewInt(0), signingHash, txAbiEncoding) return &Message{ - From: chainConfig.EntryPointAddress, + From: AA_ENTRY_POINT, To: tx.Sender, Value: big.NewInt(0), GasLimit: tx.ValidationGas - deploymentUsedGas, @@ -430,7 +435,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params func prepareAccountExecutionMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { tx := baseTx.Rip7560TransactionData() return &Message{ - From: config.EntryPointAddress, + From: AA_ENTRY_POINT, To: tx.Sender, Value: big.NewInt(0), GasLimit: tx.Gas, @@ -462,7 +467,7 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC return nil, err } return &Message{ - From: chainConfig.EntryPointAddress, + From: AA_ENTRY_POINT, To: tx.Paymaster, Value: big.NewInt(0), GasLimit: tx.PaymasterGas - executionResult.UsedGas, diff --git a/core/state_transition.go b/core/state_transition.go index 4fa2c1378642..5329386e131b 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -454,7 +454,9 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, value) } else { // Increment the nonce for the next transaction - st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) + if msg.From != AA_SENDER_CREATOR && msg.From != AA_ENTRY_POINT { + st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) + } ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, value) } From 52f50471f21db175c21318e5f09838d704275f19 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Thu, 25 Jul 2024 22:29:27 +0300 Subject: [PATCH 46/73] AA-403: Fix RIP-7560 transaction hash calculation (#19) --- core/types/tx_rip7560.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 59e279257014..7cb45aeda347 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -129,7 +129,18 @@ func (tx *Rip7560AccountAbstractionTx) setSignatureValues(chainID, v, r, s *big. } // encode the subtype byte and the payload-bearing bytes of the RIP-7560 transaction -func (tx *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { +func (t *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { + zeroAddress := common.Address{} + tx := t.copy().(*Rip7560AccountAbstractionTx) + if tx.Paymaster != nil && zeroAddress.Cmp(*tx.Paymaster) == 0 { + tx.Paymaster = nil + } + if tx.Deployer != nil && zeroAddress.Cmp(*tx.Deployer) == 0 { + tx.Deployer = nil + } + if tx.To != nil && zeroAddress.Cmp(*tx.To) == 0 { + tx.To = nil + } return rlp.Encode(b, tx) } From d450ade0af49fbccaed30f097edff525b3f3c3c6 Mon Sep 17 00:00:00 2001 From: shahafn Date: Mon, 29 Jul 2024 13:29:44 +0300 Subject: [PATCH 47/73] AA-378: Completing native tracer (#15) * Renaming, Adding tstore/tload support * Fixing tracer wip * Adding handling of value, onExit * Removing method id * Fix abi encoding of nil fields * typo * Removing logs --- core/state_processor_rip7560.go | 10 +- core/types/transaction_signing_rip7560.go | 4 +- core/types/tx_rip7560.go | 112 ++++++++++++---------- eth/tracers/native/rip7560_validation.go | 85 +++++++++++----- internal/ethapi/transaction_args.go | 42 ++++++-- tests/rip7560/deployer_test.go | 42 ++++---- tests/rip7560/paymaster_test.go | 68 ++++++------- tests/rip7560/process_test.go | 8 +- tests/rip7560/validation_test.go | 56 +++++------ 9 files changed, 251 insertions(+), 176 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 90e16428801e..72a89798261e 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -129,7 +129,7 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta // todo: move to a suitable interface, whatever that is // todo 2: maybe handle the "shared gas pool" situation instead of just overriding it completely? func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB) error { - gasLimit := st.Gas + st.ValidationGas + st.PaymasterGas + st.PostOpGas + gasLimit := st.Gas + st.ValidationGasLimit + st.PaymasterValidationGasLimit + st.PostOpGas mgval := new(uint256.Int).SetUint64(gasLimit) gasFeeCap, _ := uint256.FromBig(st.GasFeeCap) mgval = mgval.Mul(mgval, gasFeeCap) @@ -363,7 +363,7 @@ func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfi From: AA_SENDER_CREATOR, To: tx.Deployer, Value: big.NewInt(0), - GasLimit: tx.ValidationGas, + GasLimit: tx.ValidationGasLimit, GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, @@ -390,7 +390,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par From: AA_ENTRY_POINT, To: tx.Sender, Value: big.NewInt(0), - GasLimit: tx.ValidationGas - deploymentUsedGas, + GasLimit: tx.ValidationGasLimit - deploymentUsedGas, GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, @@ -421,7 +421,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params From: config.EntryPointAddress, To: tx.Paymaster, Value: big.NewInt(0), - GasLimit: tx.PaymasterGas, + GasLimit: tx.PaymasterValidationGasLimit, GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, @@ -470,7 +470,7 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC From: AA_ENTRY_POINT, To: tx.Paymaster, Value: big.NewInt(0), - GasLimit: tx.PaymasterGas - executionResult.UsedGas, + GasLimit: tx.PaymasterValidationGasLimit - executionResult.UsedGas, GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, diff --git a/core/types/transaction_signing_rip7560.go b/core/types/transaction_signing_rip7560.go index 0a84b46e56ad..79ef14efaebc 100644 --- a/core/types/transaction_signing_rip7560.go +++ b/core/types/transaction_signing_rip7560.go @@ -40,7 +40,7 @@ func (s rip7560Signer) Hash(tx *Transaction) common.Hash { aatx.PaymasterData, aatx.DeployerData, aatx.BuilderFee, - aatx.ValidationGas, - aatx.PaymasterGas, + aatx.ValidationGasLimit, + aatx.PaymasterValidationGasLimit, }) } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 7cb45aeda347..39b6960664ca 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -35,16 +35,16 @@ type Rip7560AccountAbstractionTx struct { AccessList AccessList // extra fields - Sender *common.Address - Signature []byte - Paymaster *common.Address `rlp:"nil"` - PaymasterData []byte - Deployer *common.Address `rlp:"nil"` - DeployerData []byte - BuilderFee *big.Int - ValidationGas uint64 - PaymasterGas uint64 - PostOpGas uint64 + Sender *common.Address + Signature []byte + Paymaster *common.Address `rlp:"nil"` + PaymasterData []byte + Deployer *common.Address `rlp:"nil"` + DeployerData []byte + BuilderFee *big.Int + ValidationGasLimit uint64 + PaymasterValidationGasLimit uint64 + PostOpGas uint64 // removed fields To *common.Address `rlp:"nil"` @@ -66,16 +66,16 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { GasTipCap: new(big.Int), GasFeeCap: new(big.Int), - Sender: copyAddressPtr(tx.Sender), - Signature: common.CopyBytes(tx.Signature), - Paymaster: copyAddressPtr(tx.Paymaster), - PaymasterData: common.CopyBytes(tx.PaymasterData), - Deployer: copyAddressPtr(tx.Deployer), - DeployerData: common.CopyBytes(tx.DeployerData), - BuilderFee: new(big.Int), - ValidationGas: tx.ValidationGas, - PaymasterGas: tx.PaymasterGas, - PostOpGas: tx.PostOpGas, + Sender: copyAddressPtr(tx.Sender), + Signature: common.CopyBytes(tx.Signature), + Paymaster: copyAddressPtr(tx.Paymaster), + PaymasterData: common.CopyBytes(tx.PaymasterData), + Deployer: copyAddressPtr(tx.Deployer), + DeployerData: common.CopyBytes(tx.DeployerData), + BuilderFee: new(big.Int), + ValidationGasLimit: tx.ValidationGasLimit, + PaymasterValidationGasLimit: tx.PaymasterValidationGasLimit, + PostOpGas: tx.PostOpGas, } copy(cpy.AccessList, tx.AccessList) if tx.Value != nil { @@ -151,21 +151,21 @@ func (tx *Rip7560AccountAbstractionTx) decode(input []byte) error { // Rip7560Transaction an equivalent of a solidity struct only used to encode the 'transaction' parameter type Rip7560Transaction struct { - Sender common.Address - Nonce *big.Int - ValidationGasLimit *big.Int - PaymasterGasLimit *big.Int - PostOpGasLimit *big.Int - CallGasLimit *big.Int - MaxFeePerGas *big.Int - MaxPriorityFeePerGas *big.Int - BuilderFee *big.Int - Paymaster *common.Address - PaymasterData []byte - Deployer *common.Address - DeployerData []byte - CallData []byte - Signature []byte + Sender common.Address + Nonce *big.Int + ValidationGasLimit *big.Int + PaymasterValidationGasLimit *big.Int + PostOpGasLimit *big.Int + CallGasLimit *big.Int + MaxFeePerGas *big.Int + MaxPriorityFeePerGas *big.Int + BuilderFee *big.Int + Paymaster common.Address + PaymasterData []byte + Deployer common.Address + DeployerData []byte + CallData []byte + Signature []byte } func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { @@ -173,12 +173,15 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { {Name: "sender", Type: "address"}, {Name: "nonce", Type: "uint256"}, {Name: "validationGasLimit", Type: "uint256"}, - {Name: "paymasterGasLimit", Type: "uint256"}, + {Name: "paymasterValidationGasLimit", Type: "uint256"}, + {Name: "postOpGasLimit", Type: "uint256"}, {Name: "callGasLimit", Type: "uint256"}, {Name: "maxFeePerGas", Type: "uint256"}, {Name: "maxPriorityFeePerGas", Type: "uint256"}, {Name: "builderFee", Type: "uint256"}, + {Name: "paymaster", Type: "address"}, {Name: "paymasterData", Type: "bytes"}, + {Name: "deployer", Type: "address"}, {Name: "deployerData", Type: "bytes"}, {Name: "callData", Type: "bytes"}, {Name: "signature", Type: "bytes"}, @@ -187,19 +190,32 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { args := abi.Arguments{ {Type: structThing, Name: "param_one"}, } + + paymaster := tx.Paymaster + if paymaster == nil { + paymaster = &common.Address{} + } + deployer := tx.Deployer + if deployer == nil { + deployer = &common.Address{} + } + record := &Rip7560Transaction{ - Sender: *tx.Sender, - Nonce: big.NewInt(int64(tx.Nonce)), - ValidationGasLimit: big.NewInt(int64(tx.ValidationGas)), - PaymasterGasLimit: big.NewInt(int64(tx.PaymasterGas)), - CallGasLimit: big.NewInt(int64(tx.Gas)), - MaxFeePerGas: tx.GasFeeCap, - MaxPriorityFeePerGas: tx.GasTipCap, - BuilderFee: tx.BuilderFee, - PaymasterData: tx.PaymasterData, - DeployerData: tx.DeployerData, - CallData: tx.Data, - Signature: tx.Signature, + Sender: *tx.Sender, + Nonce: big.NewInt(int64(tx.Nonce)), + ValidationGasLimit: big.NewInt(int64(tx.ValidationGasLimit)), + PaymasterValidationGasLimit: big.NewInt(int64(tx.PaymasterValidationGasLimit)), + PostOpGasLimit: big.NewInt(int64(tx.PostOpGas)), + CallGasLimit: big.NewInt(int64(tx.Gas)), + MaxFeePerGas: tx.GasFeeCap, + MaxPriorityFeePerGas: tx.GasTipCap, + BuilderFee: tx.BuilderFee, + Paymaster: *paymaster, + PaymasterData: tx.PaymasterData, + Deployer: *deployer, + DeployerData: tx.DeployerData, + CallData: tx.Data, + Signature: tx.Signature, } packed, err := args.Pack(&record) return packed, err diff --git a/eth/tracers/native/rip7560_validation.go b/eth/tracers/native/rip7560_validation.go index 9cdc90e2b34c..504c68d36961 100644 --- a/eth/tracers/native/rip7560_validation.go +++ b/eth/tracers/native/rip7560_validation.go @@ -11,6 +11,7 @@ import ( "github.com/holiman/uint256" "math/big" "regexp" + "strconv" "strings" ) @@ -33,8 +34,10 @@ type contractSizeVal struct { } type access struct { - Reads map[string]string `json:"reads"` - Writes map[string]uint64 `json:"writes"` + Reads map[string]string `json:"reads"` + Writes map[string]uint64 `json:"writes"` + TransientReads map[string]uint64 `json:"transientReads"` + TransientWrites map[string]uint64 `json:"transientWrites"` } // note - this means an individual 'frame' in 7560 (validate, execute, postOp) @@ -87,6 +90,7 @@ func newRip7560Tracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trace OnTxStart: t.OnTxStart, OnTxEnd: t.OnTxEnd, OnOpcode: t.OnOpcode, + OnExit: t.OnExit, }, GetResult: t.GetResult, Stop: t.Stop, @@ -142,6 +146,27 @@ func (b *rip7560ValidationTracer) OnEnter(depth int, typ byte, from common.Addre if depth == 0 { b.createNewTopLevelFrame(to) } + b.Calls = append(b.Calls, &callsItem{ + Type: vm.OpCode(typ).String(), + From: from, + To: to, + //Method: input[0:10], + Value: (*hexutil.Big)(value), + Gas: gas, + Data: input, + }) +} + +func (b *rip7560ValidationTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + typ := "RETURN" + if err != nil { + typ = "REVERT" + } + b.Calls = append(b.Calls, &callsItem{ + Type: typ, + GasUsed: gasUsed, + Data: output, + }) } func (b *rip7560ValidationTracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { @@ -251,14 +276,16 @@ func (b *rip7560ValidationTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, } b.lastOp = opcode - if opcode == "SLOAD" || opcode == "SSTORE" { + if opcode == "SLOAD" || opcode == "SSTORE" || opcode == "TLOAD" || opcode == "TSTORE" { slot := common.BytesToHash(StackBack(scope.StackData(), 0).Bytes()) slotHex := slot.Hex() addr := scope.Address() if _, ok := b.CurrentLevel.Access[addr]; !ok { b.CurrentLevel.Access[addr] = &access{ - Reads: map[string]string{}, - Writes: map[string]uint64{}, + Reads: map[string]string{}, + Writes: map[string]uint64{}, + TransientReads: map[string]uint64{}, + TransientWrites: map[string]uint64{}, } } access := *b.CurrentLevel.Access[addr] @@ -271,34 +298,44 @@ func (b *rip7560ValidationTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, if !rOk && !wOk { access.Reads[slotHex] = b.env.StateDB.GetState(addr, slot).Hex() } - } else { + } else if opcode == "SSTORE" { b.incrementCount(access.Writes, slotHex) + } else if opcode == "TLOAD" { + b.incrementCount(access.TransientReads, slotHex) + } else if opcode == "TSTORE" { + b.incrementCount(access.TransientWrites, slotHex) } } if opcode == "KECCAK256" { // TODO: uncomment and fix with StackBack // collect keccak on 64-byte blocks - // ofs := scope.Stack.Back(0).ToBig().Int64() - // len := scope.Stack.Back(1).ToBig().Int64() + ofs := StackBack(scope.StackData(), 0) + len := StackBack(scope.StackData(), 1) + memory := scope.MemoryData() // // currently, solidity uses only 2-word (6-byte) for a key. this might change..still, no need to // // return too much - // if len > 20 && len < 512 { - // b.Keccak = append(b.Keccak, scope.Memory.GetCopy(ofs, len)) - // } - //} else if strings.HasPrefix(opcode, "LOG") { - // count, _ := strconv.Atoi(opcode[3:]) - // ofs := scope.Stack.Back(0).ToBig().Int64() - // len := scope.Stack.Back(1).ToBig().Int64() - // topics := []hexutil.Bytes{} - // for i := 0; i < count; i++ { - // topics = append(topics, scope.Stack.Back(2+i).Bytes()) - // } - // - // b.Logs = append(b.Logs, &logsItem{ - // Data: scope.Memory.GetCopy(ofs, len), - // Topic: topics, - // }) + if len.Uint64() > 20 && len.Uint64() < 512 { + keccak := make([]byte, len.Uint64()) + copy(keccak, memory[ofs.Uint64():ofs.Uint64()+len.Uint64()]) + b.Keccak = append(b.Keccak, keccak) + } + } else if strings.HasPrefix(opcode, "LOG") { + count, _ := strconv.Atoi(opcode[3:]) + ofs := StackBack(scope.StackData(), 0) + len := StackBack(scope.StackData(), 1) + memory := scope.MemoryData() + topics := []hexutil.Bytes{} + for i := 0; i < count; i++ { + topics = append(topics, StackBack(scope.StackData(), 2+i).Bytes()) + //topics = append(topics, scope.Stack.Back(2+i).Bytes()) + } + log := make([]byte, len.Uint64()) + copy(log, memory[ofs.Uint64():ofs.Uint64()+len.Uint64()]) + b.Logs = append(b.Logs, &logsItem{ + Data: log, + Topic: topics, + }) } } diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index f8b891e65655..83a70d32489a 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -115,6 +115,9 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGas if err := args.setFeeDefaults(ctx, b); err != nil { return err } + if err := args.set7560Defaults(ctx, b); err != nil { + return err + } if args.Value == nil { args.Value = new(hexutil.Big) @@ -194,6 +197,25 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGas return nil } +func (args *TransactionArgs) set7560Defaults(ctx context.Context, b Backend) error { + // Not 7560 tx + if args.Sender == nil { + return nil + } + if args.Paymaster == nil { + log.Error("set7560Defaults setting default paymaster fields") + args.Paymaster = &common.Address{} + args.PaymasterData = &hexutil.Bytes{} + } + if args.Deployer == nil { + log.Error("set7560Defaults setting default deployer fields") + args.Deployer = &common.Address{} + args.DeployerData = &hexutil.Bytes{} + } + return nil + +} + // setFeeDefaults fills in default fee values for unspecified tx fields. func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) error { head := b.CurrentHeader() @@ -500,16 +522,16 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { Data: args.data(), AccessList: al, // RIP-7560 parameters - Sender: args.Sender, - Signature: *args.Signature, - Paymaster: args.Paymaster, - PaymasterData: *args.PaymasterData, - Deployer: args.Deployer, - DeployerData: *args.DeployerData, - BuilderFee: (*big.Int)(args.BuilderFee), - ValidationGas: uint64(*args.ValidationGas), - PaymasterGas: uint64(*args.PaymasterGas), - PostOpGas: uint64(*args.PostOpGas), + Sender: args.Sender, + Signature: *args.Signature, + Paymaster: args.Paymaster, + PaymasterData: *args.PaymasterData, + Deployer: args.Deployer, + DeployerData: *args.DeployerData, + BuilderFee: (*big.Int)(args.BuilderFee), + ValidationGasLimit: uint64(*args.ValidationGas), + PaymasterValidationGasLimit: uint64(*args.PaymasterGas), + PostOpGas: uint64(*args.PostOpGas), } data = &aatx hash := types.NewTx(data).Hash() diff --git a/tests/rip7560/deployer_test.go b/tests/rip7560/deployer_test.go index e07ea49ef14b..dfcf4105eef8 100644 --- a/tests/rip7560/deployer_test.go +++ b/tests/rip7560/deployer_test.go @@ -14,9 +14,9 @@ func TestValidationFailure_deployerRevert(t *testing.T) { withCode(DEFAULT_SENDER, []byte{}, DEFAULT_BALANCE). withCode(DEPLOYER.Hex(), revertWithData([]byte{}), 0), types.Rip7560AccountAbstractionTx{ - Deployer: &DEPLOYER, - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), + Deployer: &DEPLOYER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), }, "account deployment failed: execution reverted") } @@ -25,9 +25,9 @@ func TestValidationFailure_deployerOOG(t *testing.T) { withCode(DEFAULT_SENDER, []byte{}, DEFAULT_BALANCE). withCode(DEPLOYER.Hex(), revertWithData([]byte{}), 0), types.Rip7560AccountAbstractionTx{ - Deployer: &DEPLOYER, - ValidationGas: 1, - GasFeeCap: big.NewInt(1000000000), + Deployer: &DEPLOYER, + ValidationGasLimit: 1, + GasFeeCap: big.NewInt(1000000000), }, "account deployment failed: out of gas") } @@ -36,9 +36,9 @@ func TestValidationFailure_senderNotDeployed(t *testing.T) { withCode(DEFAULT_SENDER, []byte{}, DEFAULT_BALANCE). withCode(DEPLOYER.Hex(), returnWithData([]byte{}), 0), types.Rip7560AccountAbstractionTx{ - Deployer: &DEPLOYER, - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), + Deployer: &DEPLOYER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), }, "account deployment failed: sender not deployed") } @@ -50,10 +50,10 @@ func TestValidationFailure_senderAlreadyDeployed(t *testing.T) { withCode(sender.Hex(), accountCode, DEFAULT_BALANCE). withCode(DEPLOYER.Hex(), deployerCode, 0), types.Rip7560AccountAbstractionTx{ - Sender: &sender, - Deployer: &DEPLOYER, - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), + Sender: &sender, + Deployer: &DEPLOYER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), }, "account deployment failed: sender already deployed") } @@ -65,10 +65,10 @@ func TestValidationFailure_senderReverts(t *testing.T) { withCode(sender.Hex(), []byte{}, DEFAULT_BALANCE). withCode(DEPLOYER.Hex(), deployerCode, 0), types.Rip7560AccountAbstractionTx{ - Sender: &sender, - Deployer: &DEPLOYER, - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), + Sender: &sender, + Deployer: &DEPLOYER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), }, "execution reverted") } @@ -80,9 +80,9 @@ func TestValidation_deployer_ok(t *testing.T) { withCode(sender.Hex(), []byte{}, DEFAULT_BALANCE). withCode(DEPLOYER.Hex(), deployerCode, 0), types.Rip7560AccountAbstractionTx{ - Sender: &sender, - Deployer: &DEPLOYER, - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), + Sender: &sender, + Deployer: &DEPLOYER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), }, "ok") } diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index b169e0be239c..9e6422624552 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -15,9 +15,9 @@ func TestPaymasterValidationFailure_nobalance(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), 1), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "insufficient funds for gas * price + value: address 0xaaAaaAAAAAbBbbbbBbBBCCCCcCCCcCdddDDDdddd have 1 want 1000000000000000000") } @@ -25,19 +25,19 @@ func TestPaymasterValidationFailure_oog(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "out of gas") } func TestPaymasterValidationFailure_revert(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, - PaymasterGas: 1000000000, + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, + PaymasterValidationGasLimit: 1000000000, }, "execution reverted") } @@ -45,20 +45,20 @@ func TestPaymasterValidationFailure_unparseable_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - PaymasterGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + PaymasterValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "paymaster return data: too short") } func TestPaymasterValidationFailure_wrong_magic(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(1, 2, 3, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - PaymasterGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + PaymasterValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "paymaster did not return correct MAGIC_VALUE") } @@ -77,39 +77,39 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), pmCode, DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - PaymasterGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + PaymasterValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "paymaster return data: context too large") } func TestPaymasterValidationFailure_validAfter(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 300, 200, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - PaymasterGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + PaymasterValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "RIP-7560 transaction validity not reached yet") } func TestPaymasterValidationFailure_validUntil(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 1, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - PaymasterGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + PaymasterValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "RIP-7560 transaction validity expired") } func TestPaymasterValidation_ok(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: 1000000000, - PaymasterGas: 1000000000, - GasFeeCap: big.NewInt(1000000000), - Paymaster: &DEFAULT_PAYMASTER, + ValidationGasLimit: 1000000000, + PaymasterValidationGasLimit: 1000000000, + GasFeeCap: big.NewInt(1000000000), + Paymaster: &DEFAULT_PAYMASTER, }, "ok") } diff --git a/tests/rip7560/process_test.go b/tests/rip7560/process_test.go index c3973aaa7b09..9ddb0fca08e5 100644 --- a/tests/rip7560/process_test.go +++ b/tests/rip7560/process_test.go @@ -42,10 +42,10 @@ func TestProcess1(t *testing.T) { withCode(DEFAULT_SENDER, createAccountCode(), 1000000000000000000). build(), []*types.Rip7560AccountAbstractionTx{ { - Sender: &Sender, - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), - Data: []byte{1, 2, 3}, + Sender: &Sender, + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), + Data: []byte{1, 2, 3}, }, }) assert.NoError(t, err) diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index c7a75fdc9127..2c5bd24321b3 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -30,23 +30,23 @@ func TestUnpackValidationData(t *testing.T) { func TestValidationFailure_OOG(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1), + GasFeeCap: big.NewInt(1000000000), }, "out of gas") } func TestValidationFailure_no_balance(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 1), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1), + GasFeeCap: big.NewInt(1000000000), }, "insufficient funds for gas * price + value: address 0x1111111111222222222233333333334444444444 have 1 want 1000000000") } func TestValidationFailure_sigerror(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "account signature error") } @@ -54,8 +54,8 @@ func TestValidationFailure_validAfter(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity not reached yet") } @@ -63,45 +63,45 @@ func TestValidationFailure_validUntil(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity expired") } func TestValidation_ok(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "ok") } func TestValidation_ok_paid(t *testing.T) { aatx := types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), } tb := newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE) handleTransaction(tb, aatx, "ok") - maxCost := new(big.Int).SetUint64(aatx.ValidationGas + aatx.PaymasterGas + aatx.Gas) + maxCost := new(big.Int).SetUint64(aatx.ValidationGasLimit + aatx.PaymasterValidationGasLimit + aatx.Gas) maxCost.Mul(maxCost, aatx.GasFeeCap) } func TestValidationFailure_account_nonce(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - Nonce: 1234, - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + Nonce: 1234, + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "nonce too high: address 0x1111111111222222222233333333334444444444, tx: 1234 state: 0") } func TestValidationFailure_account_revert(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createCode(vm.PUSH0, vm.DUP1, vm.REVERT), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "execution reverted") } @@ -110,24 +110,24 @@ func TestValidationFailure_account_revert_with_reason(t *testing.T) { reason := hexutils.HexToBytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000568656c6c6f000000000000000000000000000000000000000000000000000000") handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, revertWithData(reason), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "execution reverted") } func TestValidationFailure_account_wrong_return_length(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData([]byte{1, 2, 3}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") } func TestValidationFailure_account_no_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData([]byte{}), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "invalid account return data length") } @@ -136,8 +136,8 @@ func TestValidationFailure_account_wrong_return_value(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(make([]byte, 32)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ - ValidationGas: uint64(1000000000), - GasFeeCap: big.NewInt(1000000000), + ValidationGasLimit: uint64(1000000000), + GasFeeCap: big.NewInt(1000000000), }, "account did not return correct MAGIC_VALUE") } From 2ee6caf9c5d40210f7bb000be43ba503716c4e5b Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sun, 4 Aug 2024 12:54:48 +0300 Subject: [PATCH 48/73] AA-394 receipt fields (#21) * handle transaction receipt status effectiveGasUsed refund excess to account/paymaster --- core/state_processor_rip7560.go | 75 +++++++++++++++++++++--------- core/types/receipt.go | 3 +- eth/tracers/api_tracing_rip7560.go | 4 -- 3 files changed, 55 insertions(+), 27 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 72a89798261e..b36979eef2d1 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -61,6 +62,8 @@ type ValidationPhaseResult struct { Tx *types.Transaction TxHash common.Hash PaymasterContext []byte + PreCharge *uint256.Int + EffectiveGasPrice *uint256.Int DeploymentUsedGas uint64 ValidationUsedGas uint64 PmValidationUsedGas uint64 @@ -119,21 +122,21 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta if err != nil { return nil, nil, nil, err } + statedb.Finalise(true) + receipts = append(receipts, receipt) allLogs = append(allLogs, receipt.Logs...) } return validatedTransactions, receipts, allLogs, nil } -// BuyGasRip7560Transaction // todo: move to a suitable interface, whatever that is // todo 2: maybe handle the "shared gas pool" situation instead of just overriding it completely? -func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB) error { +func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB, gasPrice *uint256.Int) (*uint256.Int, error) { gasLimit := st.Gas + st.ValidationGasLimit + st.PaymasterValidationGasLimit + st.PostOpGas - mgval := new(uint256.Int).SetUint64(gasLimit) - gasFeeCap, _ := uint256.FromBig(st.GasFeeCap) - mgval = mgval.Mul(mgval, gasFeeCap) - balanceCheck := new(uint256.Int).Set(mgval) + preCharge := new(uint256.Int).SetUint64(gasLimit) + preCharge = preCharge.Mul(preCharge, gasPrice) + balanceCheck := new(uint256.Int).Set(preCharge) chargeFrom := st.Sender @@ -142,11 +145,27 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St } if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { - return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) + return nil, fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) } - state.SubBalance(*chargeFrom, mgval, 0) - return nil + state.SubBalance(*chargeFrom, preCharge, 0) + return preCharge, nil +} + +// refund the transaction payer (either account or paymaster) with the excess gas cost +func refundPayer(vpr *ValidationPhaseResult, state vm.StateDB, gasUsed uint64) { + var chargeFrom *common.Address + if vpr.PmValidationUsedGas == 0 { + chargeFrom = vpr.Tx.Rip7560TransactionData().Sender + } else { + chargeFrom = vpr.Tx.Rip7560TransactionData().Paymaster + } + + actualGasCost := new(uint256.Int).Mul(vpr.EffectiveGasPrice, new(uint256.Int).SetUint64(gasUsed)) + + refund := new(uint256.Int).Sub(vpr.PreCharge, actualGasCost) + + state.AddBalance(*chargeFrom, refund, tracing.BalanceIncreaseGasReturn) } // precheck nonce of transaction. @@ -173,7 +192,14 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte if err != nil { return nil, err } - err = BuyGasRip7560Transaction(aatx, statedb) + + gasPrice := new(big.Int).Add(header.BaseFee, tx.GasTipCap()) + if gasPrice.Cmp(tx.GasFeeCap()) > 0 { + gasPrice = tx.GasFeeCap() + } + gasPriceUint256, _ := uint256.FromBig(gasPrice) + + preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) if err != nil { return nil, err } @@ -182,7 +208,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte sender := tx.Rip7560TransactionData().Sender txContext := vm.TxContext{ Origin: *sender, - GasPrice: tx.GasFeeCap(), + GasPrice: gasPrice, } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) @@ -211,7 +237,6 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte if err != nil { return nil, fmt.Errorf("account deployment failed: %v", err) } - statedb.IntermediateRoot(true) } else { statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) } @@ -224,7 +249,6 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte if err != nil { return nil, err } - statedb.IntermediateRoot(true) if resultAccountValidation.Err != nil { return nil, resultAccountValidation.Err } @@ -241,9 +265,12 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte if err != nil { return nil, err } + vpr := &ValidationPhaseResult{ Tx: tx, TxHash: tx.Hash(), + PreCharge: preCharge, + EffectiveGasPrice: gasPriceUint256, PaymasterContext: paymasterContext, DeploymentUsedGas: deploymentUsedGas, ValidationUsedGas: resultAccountValidation.UsedGas, @@ -253,6 +280,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte PmValidAfter: pmValidAfter, PmValidUntil: pmValidUntil, } + statedb.Finalise(true) return vpr, nil } @@ -275,7 +303,6 @@ func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.Ch if resultPm.Failed() { return nil, 0, 0, 0, resultPm.Err } - statedb.IntermediateRoot(true) if resultPm.Failed() { return nil, 0, 0, 0, errors.New("paymaster validation failed - invalid transaction") } @@ -321,36 +348,40 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase if err != nil { return nil, err } - root := statedb.IntermediateRoot(true).Bytes() var paymasterPostOpResult *ExecutionResult if len(vpr.PaymasterContext) != 0 { paymasterPostOpResult, err = applyPaymasterPostOpFrame(vpr, executionResult, evm, gp, statedb, header) - root = statedb.IntermediateRoot(true).Bytes() } if err != nil { return nil, err } - cumulativeGasUsed := + gasUsed := vpr.ValidationUsedGas + vpr.DeploymentUsedGas + vpr.PmValidationUsedGas + executionResult.UsedGas if paymasterPostOpResult != nil { - cumulativeGasUsed += + gasUsed += paymasterPostOpResult.UsedGas } - receipt := &types.Receipt{Type: vpr.Tx.Type(), PostState: root, CumulativeGasUsed: cumulativeGasUsed} - - // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(vpr.Tx.Hash(), header.Number.Uint64(), header.Hash()) + receipt := &types.Receipt{Type: vpr.Tx.Type(), TxHash: vpr.Tx.Hash(), GasUsed: gasUsed, CumulativeGasUsed: gasUsed} if executionResult.Failed() || (paymasterPostOpResult != nil && paymasterPostOpResult.Failed()) { receipt.Status = types.ReceiptStatusFailed } else { receipt.Status = types.ReceiptStatusSuccessful } + + refundPayer(vpr, statedb, gasUsed) + + // Set the receipt logs and create the bloom filter. + blockNumber := header.Number + receipt.Logs = statedb.GetLogs(vpr.TxHash, blockNumber.Uint64(), common.Hash{}) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + receipt.TransactionIndex = uint(vpr.TxIndex) + // other fields are filled in DeriveFields (all tx, block fields, and updating CumulativeGasUsed return receipt, err } diff --git a/core/types/receipt.go b/core/types/receipt.go index 4f96fde59c44..2de994d8234f 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -348,7 +348,8 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu rs[i].TransactionIndex = uint(i) // The contract address can be derived from the transaction itself - if txs[i].To() == nil { + // AA transactions always have "sender" as the account address, regardless if it is created by this TX + if txs[i].To() == nil && txs[i].Type() != Rip7560Type { // Deriving the signer is expensive, only do if it's actually needed from, _ := Sender(signer, txs[i]) rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) diff --git a/eth/tracers/api_tracing_rip7560.go b/eth/tracers/api_tracing_rip7560.go index f57e88889c6f..315e825ec3dd 100644 --- a/eth/tracers/api_tracing_rip7560.go +++ b/eth/tracers/api_tracing_rip7560.go @@ -2,7 +2,6 @@ package tracers import ( "context" - "encoding/json" "errors" "fmt" "github.com/ethereum/go-ethereum/common" @@ -12,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/rpc" - "log" "math/big" "time" ) @@ -61,8 +59,6 @@ func (api *Rip7560API) TraceRip7560Validation( if err != nil { return nil, err } - log.Println("TraceRip7560Validation result") - log.Println(string(traceResult.(json.RawMessage))) return traceResult, err } From 0d8f19200d538d290833e36c78946c8ea7c7032a Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 5 Aug 2024 17:21:37 +0300 Subject: [PATCH 49/73] AA-381: Create a "pull bundle" block building mode and a 'getRip7560Bundle' method (#18) --- circleciconfig.toml | 5 ++ core/txpool/rip7560pool/rip7560pool.go | 76 +++++++++++++++++++++++++- core/txpool/txpool_rip7560.go | 2 +- eth/api_backend.go | 1 + eth/api_backend_rip7560.go | 4 ++ eth/backend.go | 7 ++- eth/ethconfig/config.go | 12 ++++ eth/ethconfig/gen_config.go | 24 ++++++++ internal/ethapi/rip7560api.go | 10 +++- 9 files changed, 131 insertions(+), 10 deletions(-) create mode 100644 circleciconfig.toml diff --git a/circleciconfig.toml b/circleciconfig.toml new file mode 100644 index 000000000000..edc119fa42ad --- /dev/null +++ b/circleciconfig.toml @@ -0,0 +1,5 @@ +[Eth] +Rip7560MaxBundleSize = 0 +Rip7560MaxBundleGas = 0 +Rip7560PullUrls = ["http://localhost:3001/rpc"] +Rip7560AcceptPush = false diff --git a/core/txpool/rip7560pool/rip7560pool.go b/core/txpool/rip7560pool/rip7560pool.go index 19bcad417d3f..b93b1d20ca85 100644 --- a/core/txpool/rip7560pool/rip7560pool.go +++ b/core/txpool/rip7560pool/rip7560pool.go @@ -1,21 +1,30 @@ package rip7560pool import ( + "context" + "errors" + "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" "math/big" + "net/http" "sync" "sync/atomic" + "time" ) type Config struct { - MaxBundleSize uint - MaxBundleGas uint + MaxBundleSize *uint64 + MaxBundleGas *uint64 + PullUrls []string } // Rip7560BundlerPool is the transaction pool dedicated to RIP-7560 AA transactions. @@ -189,7 +198,10 @@ func (pool *Rip7560BundlerPool) PendingRip7560Bundle() (*types.ExternallyReceive defer pool.mu.Unlock() bundle := pool.selectExternalBundle() - return bundle, nil + if bundle != nil { + return bundle, nil + } + return pool.fetchBundleFromBundler() } // SubscribeTransactions is not needed for the External Bundler AA sub pool and 'ch' will never be sent anything. @@ -261,6 +273,64 @@ func (pool *Rip7560BundlerPool) GetRip7560BundleStatus(hash common.Hash) (*types return pool.includedBundles[hash], nil } +type GetRip7560BundleArgs struct { + MinBaseFee uint64 + MaxBundleGas uint64 + MaxBundleSize uint64 +} + +type GetRip7560BundleResult struct { + Bundle []ethapi.TransactionArgs + ValidForBlock *hexutil.Big +} + +func (pool *Rip7560BundlerPool) fetchBundleFromBundler() (*types.ExternallyReceivedBundle, error) { + if len(pool.config.PullUrls) == 0 { + return nil, nil + } + currentHead := pool.currentHead.Load() + chosenBundle := make([]ethapi.TransactionArgs, 0) + pullErrors := make([]error, 0) + for _, url := range pool.config.PullUrls { + client := rpc.WithHTTPClient(&http.Client{Timeout: 500 * time.Millisecond}) + cl, err := rpc.DialOptions(context.Background(), url, client) + if err != nil { + log.Warn(fmt.Sprintf("Failed to dial RIP-7560 bundler URL (%s): %v", url, err)) + } + maxBundleGas := min(*pool.config.MaxBundleGas, currentHead.GasLimit) + args := &GetRip7560BundleArgs{ + MinBaseFee: currentHead.BaseFee.Uint64(), // todo: adjust to account for possible change! + MaxBundleGas: maxBundleGas, + MaxBundleSize: *pool.config.MaxBundleSize, + } + result := &GetRip7560BundleResult{ + Bundle: make([]ethapi.TransactionArgs, 0), + } + err = cl.Call(result, "aa_getRip7560Bundle", args) + if err != nil { + log.Warn(fmt.Sprintf("Failed to fetch RIP-7560 bundle from URL (%s): %v", url, err)) + pullErrors = append(pullErrors, err) + continue + } + chosenBundle = result.Bundle + break + } + if len(pullErrors) == len(pool.config.PullUrls) { + return nil, errors.New("failed to fetch a new RIP-7560 bundle from any bundler") + } + txs := make([]*types.Transaction, len(chosenBundle)) + for i, tx := range chosenBundle { + txs[i] = tx.ToTransaction() + } + bundleHash := ethapi.CalculateBundleHash(txs) + return &types.ExternallyReceivedBundle{ + BundlerId: "result.String", + BundleHash: bundleHash, + ValidForBlock: big.NewInt(0), + Transactions: txs, + }, nil +} + // return first bundle func (pool *Rip7560BundlerPool) selectExternalBundle() *types.ExternallyReceivedBundle { if len(pool.pendingBundles) == 0 { diff --git a/core/txpool/txpool_rip7560.go b/core/txpool/txpool_rip7560.go index 5e5ef8f8e7ee..2d3ab0c24359 100644 --- a/core/txpool/txpool_rip7560.go +++ b/core/txpool/txpool_rip7560.go @@ -5,7 +5,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// SubmitBundle inserts the entire bundle of Type 4 transactions into the relevant pool. +// SubmitRip7560Bundle inserts the entire bundle of Type 4 transactions into the relevant pool. func (p *TxPool) SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error { // todo: we cannot 'filter-out' the AA pool so just passing to all pools - only AA pool has code in SubmitBundle for _, subpool := range p.subpools { diff --git a/eth/api_backend.go b/eth/api_backend.go index 8a9898b956f3..3262661c5585 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -44,6 +44,7 @@ import ( // EthAPIBackend implements ethapi.Backend and tracers.Backend for full nodes type EthAPIBackend struct { + rip7560AcceptPush bool extRPCEnabled bool allowUnprotectedTxs bool eth *Ethereum diff --git a/eth/api_backend_rip7560.go b/eth/api_backend_rip7560.go index 4a8ad2f36e6c..359e8e7166cf 100644 --- a/eth/api_backend_rip7560.go +++ b/eth/api_backend_rip7560.go @@ -2,11 +2,15 @@ package eth import ( "context" + "errors" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) func (b *EthAPIBackend) SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error { + if !b.rip7560AcceptPush { + return errors.New("illegal call to eth_sendRip7560TransactionsBundle: Config.Eth.Rip7560AcceptPush is not set") + } return b.eth.txPool.SubmitRip7560Bundle(bundle) } diff --git a/eth/backend.go b/eth/backend.go index 3a1c4eb923f4..92b8ec9e3e8d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -238,8 +238,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { legacyPool := legacypool.New(config.TxPool, eth.blockchain) rip7560PoolConfig := rip7560pool.Config{ - MaxBundleGas: 10000000, - MaxBundleSize: 100, + MaxBundleGas: config.Rip7560MaxBundleGas, + MaxBundleSize: config.Rip7560MaxBundleSize, + PullUrls: config.Rip7560PullUrls, } rip7560 := rip7560pool.New(rip7560PoolConfig, eth.blockchain, config.Miner.Etherbase) @@ -266,7 +267,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { eth.miner = miner.New(eth, config.Miner, eth.engine) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) - eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} + eth.APIBackend = &EthAPIBackend{config.Rip7560AcceptPush, stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} if eth.APIBackend.allowUnprotectedTxs { log.Info("Unprotected transactions allowed") } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index f36f212d9c3b..30c1a15df5bd 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -163,6 +163,18 @@ type Config struct { // OverrideVerkle (TODO: remove after the fork) OverrideVerkle *uint64 `toml:",omitempty"` + + // Rip7560MaxBundleGas is the maximum amount of gas that can be used by an RIP-7560 bundle + Rip7560MaxBundleGas *uint64 `toml:",omitempty"` + + // Rip7560MaxBundleSize is the maximum number of transactions an RIP-7560 bundle can contain + Rip7560MaxBundleSize *uint64 `toml:",omitempty"` + + // Rip7560PullUrls provides a list of bundlers the node will ask for new bundles for each block + Rip7560PullUrls []string + + // Rip7560AcceptPush when set to "true" the node will accept incoming 'eth_sendRip7560TransactionsBundle' + Rip7560AcceptPush bool `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index b8b9eee29423..e76c6f79de3c 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -58,6 +58,10 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCTxFeeCap float64 OverrideCancun *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` + Rip7560MaxBundleGas *uint64 `toml:",omitempty"` + Rip7560MaxBundleSize *uint64 `toml:",omitempty"` + Rip7560PullUrls []string + Rip7560AcceptPush bool `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -101,6 +105,10 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.RPCTxFeeCap = c.RPCTxFeeCap enc.OverrideCancun = c.OverrideCancun enc.OverrideVerkle = c.OverrideVerkle + enc.Rip7560MaxBundleGas = c.Rip7560MaxBundleGas + enc.Rip7560MaxBundleSize = c.Rip7560MaxBundleSize + enc.Rip7560PullUrls = c.Rip7560PullUrls + enc.Rip7560AcceptPush = c.Rip7560AcceptPush return &enc, nil } @@ -148,6 +156,10 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCTxFeeCap *float64 OverrideCancun *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` + Rip7560MaxBundleGas *uint64 `toml:",omitempty"` + Rip7560MaxBundleSize *uint64 `toml:",omitempty"` + Rip7560PullUrls []string + Rip7560AcceptPush *bool `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -276,5 +288,17 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideVerkle != nil { c.OverrideVerkle = dec.OverrideVerkle } + if dec.Rip7560MaxBundleGas != nil { + c.Rip7560MaxBundleGas = dec.Rip7560MaxBundleGas + } + if dec.Rip7560MaxBundleSize != nil { + c.Rip7560MaxBundleSize = dec.Rip7560MaxBundleSize + } + if dec.Rip7560PullUrls != nil { + c.Rip7560PullUrls = dec.Rip7560PullUrls + } + if dec.Rip7560AcceptPush != nil { + c.Rip7560AcceptPush = *dec.Rip7560AcceptPush + } return nil } diff --git a/internal/ethapi/rip7560api.go b/internal/ethapi/rip7560api.go index 4e669cc19c5f..4043cfc00c6b 100644 --- a/internal/ethapi/rip7560api.go +++ b/internal/ethapi/rip7560api.go @@ -23,7 +23,7 @@ func (s *TransactionAPI) SendRip7560TransactionsBundle(ctx context.Context, args ValidForBlock: creationBlock, Transactions: txs, } - bundleHash := calculateBundleHash(txs) + bundleHash := CalculateBundleHash(txs) bundle.BundleHash = bundleHash err := SubmitRip7560Bundle(ctx, s.b, bundle) if err != nil { @@ -37,15 +37,19 @@ func (s *TransactionAPI) GetRip7560BundleStatus(ctx context.Context, hash common return bundleStats, err } +// CalculateBundleHash // TODO: If this code is indeed necessary, keep it in utils; better - remove altogether. -func calculateBundleHash(txs []*types.Transaction) common.Hash { +func CalculateBundleHash(txs []*types.Transaction) common.Hash { appendedTxIds := make([]byte, 0) for _, tx := range txs { txHash := tx.Hash() appendedTxIds = append(appendedTxIds, txHash[:]...) } - return rlpHash(appendedTxIds) + bundleHash := rlpHash(appendedTxIds) + println("calculateBundleHash") + println(bundleHash.String()) + return bundleHash } func rlpHash(x interface{}) (h common.Hash) { From a6cb9caa2820f77f26fd2c0d1b70a020db65a285 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Sun, 11 Aug 2024 19:51:04 +0200 Subject: [PATCH 50/73] Fix crash --- internal/ethapi/transaction_args.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 83a70d32489a..393571fce9d3 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -522,16 +522,20 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { Data: args.data(), AccessList: al, // RIP-7560 parameters - Sender: args.Sender, - Signature: *args.Signature, - Paymaster: args.Paymaster, - PaymasterData: *args.PaymasterData, - Deployer: args.Deployer, - DeployerData: *args.DeployerData, - BuilderFee: (*big.Int)(args.BuilderFee), - ValidationGasLimit: uint64(*args.ValidationGas), - PaymasterValidationGasLimit: uint64(*args.PaymasterGas), - PostOpGas: uint64(*args.PostOpGas), + Sender: args.Sender, + Signature: *args.Signature, + Paymaster: args.Paymaster, + PaymasterData: *args.PaymasterData, + Deployer: args.Deployer, + DeployerData: *args.DeployerData, + BuilderFee: (*big.Int)(args.BuilderFee), + ValidationGasLimit: uint64(*args.ValidationGas), + } + if args.PaymasterGas != nil { + aatx.PaymasterValidationGasLimit = uint64(*args.PaymasterGas) + } + if args.PostOpGas != nil { + aatx.PostOpGas = uint64(*args.PostOpGas) } data = &aatx hash := types.NewTx(data).Hash() From 4513a3d59cd69e123b7d2494aa69478cadfd413a Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Sun, 11 Aug 2024 21:25:59 +0300 Subject: [PATCH 51/73] getTransaction (#24) --- internal/ethapi/api.go | 67 ++++++++++++++++++++++++++--- internal/ethapi/transaction_args.go | 31 +++++++------ 2 files changed, 79 insertions(+), 19 deletions(-) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index f211dcc6598b..b6181dec8f8a 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1329,7 +1329,7 @@ func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inc type RPCTransaction struct { BlockHash *common.Hash `json:"blockHash"` BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` + From common.Address `json:"from,omitempty"` Gas hexutil.Uint64 `json:"gas"` GasPrice *hexutil.Big `json:"gasPrice"` GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` @@ -1338,17 +1338,43 @@ type RPCTransaction struct { Hash common.Hash `json:"hash"` Input hexutil.Bytes `json:"input"` Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` + To *common.Address `json:"to,omitempty"` TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` Value *hexutil.Big `json:"value"` Type hexutil.Uint64 `json:"type"` Accesses *types.AccessList `json:"accessList,omitempty"` ChainID *hexutil.Big `json:"chainId,omitempty"` BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` + V *hexutil.Big `json:"v,omitempty"` + R *hexutil.Big `json:"r,omitempty"` + S *hexutil.Big `json:"s,omitempty"` YParity *hexutil.Uint64 `json:"yParity,omitempty"` + + // Introduced by RIP-7560 Transaction + Sender *common.Address `json:"sender,omitempty"` + Signature *hexutil.Bytes `json:"signature,omitempty"` + Paymaster *common.Address `json:"paymaster,omitempty"` + PaymasterData *hexutil.Bytes `json:"paymasterData,omitempty"` + Deployer *common.Address `json:"deployer,omitempty"` + DeployerData *hexutil.Bytes `json:"deployerData,omitempty"` + BuilderFee *hexutil.Big `json:"builderFee,omitempty"` + ValidationGas *hexutil.Uint64 `json:"verificationGasLimit,omitempty"` + PaymasterValidationGasLimit *hexutil.Uint64 `json:"paymasterVerificationGasLimit,omitempty"` + PostOpGas *hexutil.Uint64 `json:"paymasterPostOpGasLimit,omitempty"` +} + +func toBytes(data []byte) *hexutil.Bytes { + if len(data) == 0 { + return nil + } + return (*hexutil.Bytes)(&data) +} + +func conditional_uint64(v uint64, addr *common.Address) *hexutil.Uint64 { + if addr == nil { + return nil + } + return (*hexutil.Uint64)(&v) } // newRPCTransaction returns a transaction that will serialize to the RPC @@ -1407,6 +1433,37 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.GasPrice = (*hexutil.Big)(tx.GasFeeCap()) } + case types.Rip7560Type: + rip7560Tx := tx.Rip7560TransactionData() + + result.S = nil + result.R = nil + result.V = nil + result.Input = rip7560Tx.Data + result.Sender = rip7560Tx.Sender + result.Signature = toBytes(rip7560Tx.Signature) + result.Gas = hexutil.Uint64(tx.Gas()) + result.Paymaster = rip7560Tx.Paymaster + result.PaymasterData = toBytes(rip7560Tx.PaymasterData) + result.Deployer = rip7560Tx.Deployer + result.DeployerData = toBytes(rip7560Tx.DeployerData) + result.BuilderFee = (*hexutil.Big)(rip7560Tx.BuilderFee) + result.ValidationGas = (*hexutil.Uint64)(&rip7560Tx.ValidationGasLimit) + result.PaymasterValidationGasLimit = conditional_uint64(rip7560Tx.PaymasterValidationGasLimit, rip7560Tx.Paymaster) + result.PostOpGas = conditional_uint64(rip7560Tx.PostOpGas, rip7560Tx.Paymaster) + + //shared fields with DynamicFeeTxType + result.ChainID = (*hexutil.Big)(tx.ChainId()) + result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap()) + result.GasTipCap = (*hexutil.Big)(tx.GasTipCap()) + // if the transaction has been mined, compute the effective gas price + if baseFee != nil && blockHash != (common.Hash{}) { + // price = min(gasTipCap + baseFee, gasFeeCap) + result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee)) + } else { + result.GasPrice = (*hexutil.Big)(tx.GasFeeCap()) + } + case types.BlobTxType: al := tx.AccessList() yparity := hexutil.Uint64(v.Sign()) diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 393571fce9d3..bdff65a038a1 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -501,6 +501,13 @@ func (args *TransactionArgs) ToMessage(baseFee *big.Int) *core.Message { } } +func toUint64(b *hexutil.Uint64) uint64 { + if b == nil { + return 0 + } + return uint64(*b) +} + // ToTransaction converts the arguments to a transaction. // This assumes that setDefaults has been called. func (args *TransactionArgs) ToTransaction() *types.Transaction { @@ -522,20 +529,16 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { Data: args.data(), AccessList: al, // RIP-7560 parameters - Sender: args.Sender, - Signature: *args.Signature, - Paymaster: args.Paymaster, - PaymasterData: *args.PaymasterData, - Deployer: args.Deployer, - DeployerData: *args.DeployerData, - BuilderFee: (*big.Int)(args.BuilderFee), - ValidationGasLimit: uint64(*args.ValidationGas), - } - if args.PaymasterGas != nil { - aatx.PaymasterValidationGasLimit = uint64(*args.PaymasterGas) - } - if args.PostOpGas != nil { - aatx.PostOpGas = uint64(*args.PostOpGas) + Sender: args.Sender, + Signature: *args.Signature, + Paymaster: args.Paymaster, + PaymasterData: *args.PaymasterData, + Deployer: args.Deployer, + DeployerData: *args.DeployerData, + BuilderFee: (*big.Int)(args.BuilderFee), + ValidationGasLimit: toUint64(args.ValidationGas), + PaymasterValidationGasLimit: toUint64(args.PaymasterGas), + PostOpGas: toUint64(args.PostOpGas), } data = &aatx hash := types.NewTx(data).Hash() From d8ab4ebd194e380c99ba5c4e5ceb84fcc50322c9 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 13 Aug 2024 18:30:24 +0300 Subject: [PATCH 52/73] AA-257: Return useful errors and on-chain revert reasons to the RPC response message (#22) * Propagate validation on-chain revert reasons to the RPC response message * Explicit error for missing both deployed code and factory * Bubble up the real 'RevertReason' through the 'TraceRip7560Validation' API --- core/state_processor_rip7560.go | 54 +++++++++++++++++++----------- eth/tracers/api_tracing_rip7560.go | 51 +++++++++++++++++++++++----- 2 files changed, 77 insertions(+), 28 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index b36979eef2d1..c2f3669e34a6 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -71,6 +71,8 @@ type ValidationPhaseResult struct { SenderValidUntil uint64 PmValidAfter uint64 PmValidUntil uint64 + RevertReason []byte + RevertEntityName string } // HandleRip7560Transactions apply state changes of all sequential RIP-7560 transactions and return @@ -227,17 +229,23 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte } else { resultDeployer, err = ApplyMessage(evm, deployerMsg, gp) } - if err == nil && resultDeployer != nil { - err = resultDeployer.Err - deploymentUsedGas = resultDeployer.UsedGas - } - if err == nil && statedb.GetCodeSize(*sender) == 0 { - err = errors.New("sender not deployed") - } if err != nil { return nil, fmt.Errorf("account deployment failed: %v", err) } + if resultDeployer.Failed() { + return &ValidationPhaseResult{ + RevertEntityName: "deployer", + RevertReason: resultDeployer.ReturnData, + }, nil + } + if statedb.GetCodeSize(*sender) == 0 { + return nil, fmt.Errorf("account was not deployed by a factory, account:%s factory%s", sender.String(), deployerMsg.To.String()) + } + deploymentUsedGas = resultDeployer.UsedGas } else { + if statedb.GetCodeSize(*sender) == 0 { + return nil, fmt.Errorf("account is not deployed and no factory is specified, account:%s", sender.String()) + } statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) } @@ -249,8 +257,11 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte if err != nil { return nil, err } - if resultAccountValidation.Err != nil { - return nil, resultAccountValidation.Err + if resultAccountValidation.Failed() { + return &ValidationPhaseResult{ + RevertEntityName: "account", + RevertReason: resultAccountValidation.ReturnData, + }, nil } validAfter, validUntil, err := validateAccountReturnData(resultAccountValidation.ReturnData) if err != nil { @@ -261,10 +272,16 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte return nil, err } - paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) + paymasterContext, paymasterRevertReason, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) if err != nil { return nil, err } + if paymasterRevertReason != nil { + return &ValidationPhaseResult{ + RevertEntityName: "paymaster", + RevertReason: paymasterRevertReason, + }, nil + } vpr := &ValidationPhaseResult{ Tx: tx, @@ -285,7 +302,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte return vpr, nil } -func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, uint64, uint64, uint64, error) { +func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, []byte, uint64, uint64, uint64, error) { /*** Paymaster Validation Frame ***/ var pmValidationUsedGas uint64 var paymasterContext []byte @@ -293,30 +310,27 @@ func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.Ch var pmValidUntil uint64 paymasterMsg, err := preparePaymasterValidationMessage(tx, chainConfig, signingHash) if err != nil { - return nil, 0, 0, 0, err + return nil, nil, 0, 0, 0, err } if paymasterMsg != nil { resultPm, err := ApplyMessage(evm, paymasterMsg, gp) if err != nil { - return nil, 0, 0, 0, err - } - if resultPm.Failed() { - return nil, 0, 0, 0, resultPm.Err + return nil, nil, 0, 0, 0, err } if resultPm.Failed() { - return nil, 0, 0, 0, errors.New("paymaster validation failed - invalid transaction") + return nil, resultPm.ReturnData, 0, 0, 0, nil } pmValidationUsedGas = resultPm.UsedGas paymasterContext, pmValidAfter, pmValidUntil, err = validatePaymasterReturnData(resultPm.ReturnData) if err != nil { - return nil, 0, 0, 0, err + return nil, nil, 0, 0, 0, err } err = validateValidityTimeRange(header.Time, pmValidAfter, pmValidUntil) if err != nil { - return nil, 0, 0, 0, err + return nil, nil, 0, 0, 0, err } } - return paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, nil + return paymasterContext, nil, pmValidationUsedGas, pmValidAfter, pmValidUntil, nil } func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) (*ExecutionResult, error) { diff --git a/eth/tracers/api_tracing_rip7560.go b/eth/tracers/api_tracing_rip7560.go index 315e825ec3dd..88da7889e631 100644 --- a/eth/tracers/api_tracing_rip7560.go +++ b/eth/tracers/api_tracing_rip7560.go @@ -4,7 +4,9 @@ import ( "context" "errors" "fmt" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -15,6 +17,35 @@ import ( "time" ) +// note: revertError code is copied here from the 'ethapi' package + +// revertError is an API error that encompasses an EVM revert with JSON error +// code and a binary data blob. +type validationRevertError struct { + error + reason string // revert reason hex encoded +} + +func (v *validationRevertError) ErrorData() interface{} { + return v.reason +} + +// newValidationRevertError creates a revertError instance with the provided revert data. +func newValidationRevertError(vpr *core.ValidationPhaseResult) *validationRevertError { + errorMessage := fmt.Sprintf("validation phase reverted in contract %s", vpr.RevertEntityName) + // TODO: use "vm.ErrorX" for RIP-7560 specific errors as well! + err := errors.New(errorMessage) + + reason, errUnpack := abi.UnpackRevert(vpr.RevertReason) + if errUnpack == nil { + err = fmt.Errorf("%w: %v", err, reason) + } + return &validationRevertError{ + error: err, + reason: hexutil.Encode(vpr.RevertReason), + } +} + // Rip7560API is the collection of tracing APIs exposed over the private debugging endpoint. type Rip7560API struct { backend Backend @@ -55,10 +86,13 @@ func (api *Rip7560API) TraceRip7560Validation( if config != nil { traceConfig = &config.TraceConfig } - traceResult, err := api.traceTx(ctx, tx, new(Context), block, vmctx, statedb, traceConfig) + traceResult, vpr, err := api.traceTx(ctx, tx, new(Context), block, vmctx, statedb, traceConfig) if err != nil { return nil, err } + if vpr != nil && vpr.RevertReason != nil { + return nil, newValidationRevertError(vpr) + } return traceResult, err } @@ -83,7 +117,7 @@ func (api *Rip7560API) chainContext(ctx context.Context) core.ChainContext { return ethapi.NewChainContext(ctx, api.backend) } -func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx *Context, block *types.Block, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx *Context, block *types.Block, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, *core.ValidationPhaseResult, error) { var ( tracer *Tracer err error @@ -113,7 +147,7 @@ func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx // Define a meaningful timeout of a single transaction trace if config.Timeout != nil { if timeout, err = time.ParseDuration(*config.Timeout); err != nil { - return nil, err + return nil, nil, err } } deadlineCtx, cancel := context.WithTimeout(ctx, timeout) @@ -133,13 +167,14 @@ func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx // TODO: this is added to allow our bundler checking the 'TraceValidation' API is supported on Geth if tx.Rip7560TransactionData().Sender.Cmp(common.HexToAddress("0x0000000000000000000000000000000000000000")) == 0 { - return tracer.GetResult() + result, err := tracer.GetResult() + return result, nil, err } - _, err = core.ApplyRip7560ValidationPhases(api.backend.ChainConfig(), api.chainContext(ctx), nil, gp, statedb, block.Header(), tx, vmenv.Config) - //_, err = core.ApplyTransactionWithEVM(message, api.backend.ChainConfig(), new(core.GasPool).AddGas(message.GasLimit), statedb, vmctx.BlockNumber, txctx.BlockHash, tx, &usedGas, vmenv) + vpr, err := core.ApplyRip7560ValidationPhases(api.backend.ChainConfig(), api.chainContext(ctx), nil, gp, statedb, block.Header(), tx, vmenv.Config) if err != nil { - return nil, fmt.Errorf("tracing failed: %w", err) + return nil, nil, fmt.Errorf("tracing failed: %w", err) } - return tracer.GetResult() + result, err := tracer.GetResult() + return result, vpr, err } From 9a1044ec95aa8a5bcddf3ba43c7226f5a82fcc39 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 13 Aug 2024 19:47:55 +0300 Subject: [PATCH 53/73] AA-411: Parse calls to EntryPoint address for account/paymaster 'acceptance' (#23) * AA-411: Parse calls to EntryPoint address for account/paymaster 'acceptance' * Extract ABI encoding&decoding into file; use ABI instead of manual bit parsing * Remove rudimentary mentions of "magic" in RIP-7560 related code * Copy the "evm.Config.Tracer" instead of overriding the original object * Remove depth check, compare addresses instead --- core/rip7560_abi.go | 83 +++++++++++ core/rip7560_abi_constants.go | 75 ++++++++++ core/state_processor_rip7560.go | 237 ++++++++++++++---------------- params/config.go | 4 - tests/rip7560/paymaster_test.go | 12 +- tests/rip7560/rip7560TestUtils.go | 13 +- tests/rip7560/validation_test.go | 16 +- 7 files changed, 284 insertions(+), 156 deletions(-) create mode 100644 core/rip7560_abi.go create mode 100644 core/rip7560_abi_constants.go diff --git a/core/rip7560_abi.go b/core/rip7560_abi.go new file mode 100644 index 000000000000..cc4aaf02f1d1 --- /dev/null +++ b/core/rip7560_abi.go @@ -0,0 +1,83 @@ +package core + +import ( + "errors" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "math/big" + "strings" +) + +type AcceptAccountData struct { + ValidAfter *big.Int + ValidUntil *big.Int +} + +type AcceptPaymasterData struct { + ValidAfter *big.Int + ValidUntil *big.Int + Context []byte +} + +func abiEncodeValidateTransaction(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { + jsonAbi, err := abi.JSON(strings.NewReader(ValidateTransactionAbi)) + if err != nil { + return nil, err + } + + txAbiEncoding, err := tx.AbiEncode() + validateTransactionData, err := jsonAbi.Pack("validateTransaction", big.NewInt(Rip7560AbiVersion), signingHash, txAbiEncoding) + return validateTransactionData, err +} + +func abiEncodeValidatePaymasterTransaction(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { + jsonAbi, err := abi.JSON(strings.NewReader(ValidatePaymasterTransactionAbi)) + txAbiEncoding, err := tx.AbiEncode() + data, err := jsonAbi.Pack("validatePaymasterTransaction", big.NewInt(Rip7560AbiVersion), signingHash, txAbiEncoding) + return data, err +} + +func abiEncodePostPaymasterTransaction(context []byte) ([]byte, error) { + jsonAbi, err := abi.JSON(strings.NewReader(PostPaymasterTransactionAbi)) + if err != nil { + return nil, err + } + // TODO: pass actual gas cost parameter here! + postOpData, err := jsonAbi.Pack("postPaymasterTransaction", true, big.NewInt(0), context) + return postOpData, err +} + +func abiDecodeAcceptAccount(input []byte) (*AcceptAccountData, error) { + jsonAbi, err := abi.JSON(strings.NewReader(AcceptAccountAbi)) + if err != nil { + return nil, err + } + methodSelector := new(big.Int).SetBytes(input[:4]).Uint64() + if methodSelector != AcceptAccountMethodSig { + if methodSelector == SigFailAccountMethodSig { + return nil, errors.New("account signature error") + } + return nil, errors.New("account validation did call the EntryPoint but not the 'acceptAccount' callback") + } + acceptAccountData := &AcceptAccountData{} + err = jsonAbi.UnpackIntoInterface(acceptAccountData, "acceptAccount", input[4:]) + return acceptAccountData, err +} + +func abiDecodeAcceptPaymaster(input []byte) (*AcceptPaymasterData, error) { + jsonAbi, err := abi.JSON(strings.NewReader(AcceptPaymasterAbi)) + if err != nil { + return nil, err + } + methodSelector := new(big.Int).SetBytes(input[:4]).Uint64() + if methodSelector != AcceptPaymasterMethodSig { + return nil, errors.New("paymaster validation did call the EntryPoint but not the 'acceptPaymaster' callback") + } + acceptPaymasterData := &AcceptPaymasterData{} + err = jsonAbi.UnpackIntoInterface(acceptPaymasterData, "acceptPaymaster", input[4:]) + if len(acceptPaymasterData.Context) > PaymasterMaxContextSize { + return nil, errors.New("paymaster return data: context too large") + } + return acceptPaymasterData, err +} diff --git a/core/rip7560_abi_constants.go b/core/rip7560_abi_constants.go new file mode 100644 index 000000000000..da7ff2838779 --- /dev/null +++ b/core/rip7560_abi_constants.go @@ -0,0 +1,75 @@ +package core + +const AcceptAccountMethodSig = uint64(0x1256ebd1) // acceptAccount(uint256,uint256) +const AcceptPaymasterMethodSig = uint64(0x03be8439) // acceptPaymaster(uint256,uint256,bytes) +const SigFailAccountMethodSig = uint64(0x7715fac2) // sigFailAccount(uint256,uint256) +const PaymasterMaxContextSize = 65536 +const Rip7560AbiVersion = 0 + +const ValidateTransactionAbi = ` +[ + { + "type":"function", + "name":"validateTransaction", + "inputs": [ + {"name": "version","type": "uint256"}, + {"name": "txHash","type": "bytes32"}, + {"name": "transaction","type": "bytes"} + ] + } +]` + +const ValidatePaymasterTransactionAbi = ` +[ + { + "type":"function", + "name":"validatePaymasterTransaction", + "inputs": [ + {"name": "version","type": "uint256"}, + {"name": "txHash","type": "bytes32"}, + {"name": "transaction","type": "bytes"} + ] + } +]` + +const PostPaymasterTransactionAbi = ` +[ + { + "type":"function", + "name":"postPaymasterTransaction", + "inputs": [ + {"name": "success","type": "bool"}, + {"name": "actualGasCost","type": "uint256"}, + {"name": "context","type": "bytes"} + ] + } +]` + +// AcceptAccountAbi Note that this is not a true ABI of the "acceptAccount" function. +// This ABI swaps inputs and outputs to simplify the ABI decoding. +const AcceptAccountAbi = ` +[ + { + "type":"function", + "name":"acceptAccount", + "outputs": [ + {"name": "validAfter","type": "uint256"}, + {"name": "validUntil","type": "uint256"} + ] + } +]` + +// AcceptPaymasterAbi Note that this is not a true ABI of the "acceptPaymaster" function. +// This ABI swaps inputs and outputs to simplify the ABI decoding. +const AcceptPaymasterAbi = ` +[ + { + "type":"function", + "name":"acceptPaymaster", + "outputs": [ + {"name": "validAfter","type": "uint256"}, + {"name": "validUntil","type": "uint256"}, + {"name": "context","type": "bytes"} + ] + } +]` diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index c2f3669e34a6..efb2b22352e4 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -3,7 +3,6 @@ package core import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" @@ -12,49 +11,16 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" "math/big" - "strings" ) -const MAGIC_VALUE_SENDER = uint64(0xbf45c166) -const MAGIC_VALUE_PAYMASTER = uint64(0xe0e6183a) -const MAGIC_VALUE_SIGFAIL = uint64(0x31665494) -const PAYMASTER_MAX_CONTEXT_SIZE = 65536 - var AA_ENTRY_POINT = common.HexToAddress("0x0000000000000000000000000000000000007560") var AA_SENDER_CREATOR = common.HexToAddress("0x00000000000000000000000000000000ffff7560") -func PackValidationData(authorizerMagic uint64, validUntil, validAfter uint64) []byte { - - t := new(big.Int).SetUint64(uint64(validAfter)) - t = t.Lsh(t, 48).Add(t, new(big.Int).SetUint64(validUntil&0xffffff)) - t = t.Lsh(t, 160).Add(t, new(big.Int).SetUint64(uint64(authorizerMagic))) - return common.LeftPadBytes(t.Bytes(), 32) -} - -func UnpackValidationData(validationData []byte) (authorizerMagic uint64, validUntil, validAfter uint64) { - - authorizerMagic = new(big.Int).SetBytes(validationData[:20]).Uint64() - validUntil = new(big.Int).SetBytes(validationData[20:26]).Uint64() - validAfter = new(big.Int).SetBytes(validationData[26:32]).Uint64() - return -} - -func UnpackPaymasterValidationReturn(paymasterValidationReturn []byte) (validationData, context []byte, err error) { - if len(paymasterValidationReturn) < 96 { - return nil, nil, errors.New("paymaster return data: too short") - } - validationData = paymasterValidationReturn[0:32] - //2nd bytes32 is ignored (its an offset value) - contextLen := new(big.Int).SetBytes(paymasterValidationReturn[64:96]) - if uint64(len(paymasterValidationReturn)) < 96+contextLen.Uint64() { - return nil, nil, errors.New("paymaster return data: unable to decode context") - } - if contextLen.Cmp(big.NewInt(PAYMASTER_MAX_CONTEXT_SIZE)) > 0 { - return nil, nil, errors.New("paymaster return data: context too large") - } - - context = paymasterValidationReturn[96 : 96+contextLen.Uint64()] - return +type EntryPointCall struct { + OnEnterSuper tracing.EnterHook + Input []byte + From common.Address + err error } type ValidationPhaseResult struct { @@ -213,8 +179,21 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte GasPrice: gasPrice, } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) + epc := &EntryPointCall{} + + if evm.Config.Tracer == nil { + evm.Config.Tracer = &tracing.Hooks{ + OnEnter: epc.OnEnter, + } + } else { + // keep the original tracer's OnEnter hook + epc.OnEnterSuper = evm.Config.Tracer.OnEnter + newTracer := *evm.Config.Tracer + newTracer.OnEnter = epc.OnEnter + evm.Config.Tracer = &newTracer + } - if evm.Config.Tracer != nil && evm.Config.Tracer.OnTxStart != nil { + if evm.Config.Tracer.OnTxStart != nil { evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, common.Address{}) } @@ -263,16 +242,23 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte RevertReason: resultAccountValidation.ReturnData, }, nil } - validAfter, validUntil, err := validateAccountReturnData(resultAccountValidation.ReturnData) + aad, err := validateAccountEntryPointCall(epc, aatx.Sender) if err != nil { return nil, err } - err = validateValidityTimeRange(header.Time, validAfter, validUntil) + + // clear the EntryPoint calls array after parsing + epc.err = nil + epc.Input = nil + epc.From = common.Address{} + + err = validateValidityTimeRange(header.Time, aad.ValidAfter.Uint64(), aad.ValidUntil.Uint64()) if err != nil { return nil, err } - paymasterContext, paymasterRevertReason, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(tx, chainConfig, signingHash, evm, gp, statedb, header) + vpr := &ValidationPhaseResult{} + paymasterContext, paymasterRevertReason, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(epc, tx, chainConfig, signingHash, evm, gp, statedb, header) if err != nil { return nil, err } @@ -283,54 +269,48 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte }, nil } - vpr := &ValidationPhaseResult{ - Tx: tx, - TxHash: tx.Hash(), - PreCharge: preCharge, - EffectiveGasPrice: gasPriceUint256, - PaymasterContext: paymasterContext, - DeploymentUsedGas: deploymentUsedGas, - ValidationUsedGas: resultAccountValidation.UsedGas, - PmValidationUsedGas: pmValidationUsedGas, - SenderValidAfter: validAfter, - SenderValidUntil: validUntil, - PmValidAfter: pmValidAfter, - PmValidUntil: pmValidUntil, - } + vpr.Tx = tx + vpr.TxHash = tx.Hash() + vpr.PreCharge = preCharge + vpr.EffectiveGasPrice = gasPriceUint256 + vpr.PaymasterContext = paymasterContext + vpr.DeploymentUsedGas = deploymentUsedGas + vpr.ValidationUsedGas = resultAccountValidation.UsedGas + vpr.PmValidationUsedGas = pmValidationUsedGas + vpr.SenderValidAfter = aad.ValidAfter.Uint64() + vpr.SenderValidUntil = aad.ValidUntil.Uint64() + vpr.PmValidAfter = pmValidAfter + vpr.PmValidUntil = pmValidUntil statedb.Finalise(true) return vpr, nil } -func applyPaymasterValidationFrame(tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, []byte, uint64, uint64, uint64, error) { +func applyPaymasterValidationFrame(epc *EntryPointCall, tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, []byte, uint64, uint64, uint64, error) { /*** Paymaster Validation Frame ***/ + aatx := tx.Rip7560TransactionData() var pmValidationUsedGas uint64 - var paymasterContext []byte - var pmValidAfter uint64 - var pmValidUntil uint64 paymasterMsg, err := preparePaymasterValidationMessage(tx, chainConfig, signingHash) + if paymasterMsg == nil || err != nil { + return nil, nil, 0, 0, 0, err + } + resultPm, err := ApplyMessage(evm, paymasterMsg, gp) if err != nil { return nil, nil, 0, 0, 0, err } - if paymasterMsg != nil { - resultPm, err := ApplyMessage(evm, paymasterMsg, gp) - if err != nil { - return nil, nil, 0, 0, 0, err - } - if resultPm.Failed() { - return nil, resultPm.ReturnData, 0, 0, 0, nil - } - pmValidationUsedGas = resultPm.UsedGas - paymasterContext, pmValidAfter, pmValidUntil, err = validatePaymasterReturnData(resultPm.ReturnData) - if err != nil { - return nil, nil, 0, 0, 0, err - } - err = validateValidityTimeRange(header.Time, pmValidAfter, pmValidUntil) - if err != nil { - return nil, nil, 0, 0, 0, err - } + if resultPm.Failed() { + return nil, resultPm.ReturnData, 0, 0, 0, nil } - return paymasterContext, nil, pmValidationUsedGas, pmValidAfter, pmValidUntil, nil + pmValidationUsedGas = resultPm.UsedGas + apd, err := validatePaymasterEntryPointCall(epc, aatx.Paymaster) + if err != nil { + return nil, nil, 0, 0, 0, err + } + err = validateValidityTimeRange(header.Time, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64()) + if err != nil { + return nil, nil, 0, 0, 0, err + } + return apd.Context, nil, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil } func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) (*ExecutionResult, error) { @@ -421,16 +401,10 @@ func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfi func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, deploymentUsedGas uint64) (*Message, error) { tx := baseTx.Rip7560TransactionData() - jsondata := `[ - {"type":"function","name":"validateTransaction","inputs": [{"name": "version","type": "uint256"},{"name": "txHash","type": "bytes32"},{"name": "transaction","type": "bytes"}]} - ]` - - validateTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) + data, err := abiEncodeValidateTransaction(tx, signingHash) if err != nil { return nil, err } - txAbiEncoding, err := tx.AbiEncode() - validateTransactionData, err := validateTransactionAbi.Pack("validateTransaction", big.NewInt(0), signingHash, txAbiEncoding) return &Message{ From: AA_ENTRY_POINT, To: tx.Sender, @@ -439,7 +413,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, - Data: validateTransactionData, + Data: data, AccessList: make(types.AccessList, 0), SkipAccountChecks: true, IsRip7560Frame: true, @@ -451,19 +425,12 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params if tx.Paymaster == nil || tx.Paymaster.Cmp(common.Address{}) == 0 { return nil, nil } - jsondata := `[ - {"type":"function","name":"validatePaymasterTransaction","inputs": [{"name": "version","type": "uint256"},{"name": "txHash","type": "bytes32"},{"name": "transaction","type": "bytes"}]} - ]` - - validateTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) - txAbiEncoding, err := tx.AbiEncode() - data, err := validateTransactionAbi.Pack("validatePaymasterTransaction", big.NewInt(0), signingHash, txAbiEncoding) - + data, err := abiEncodeValidatePaymasterTransaction(tx, signingHash) if err != nil { return nil, err } return &Message{ - From: config.EntryPointAddress, + From: AA_ENTRY_POINT, To: tx.Paymaster, Value: big.NewInt(0), GasLimit: tx.PaymasterValidationGasLimit, @@ -498,16 +465,8 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC if len(vpr.PaymasterContext) == 0 { return nil, nil } - tx := vpr.Tx.Rip7560TransactionData() - jsondata := `[ - {"type":"function","name":"postPaymasterTransaction","inputs": [{"name": "success","type": "bool"},{"name": "actualGasCost","type": "uint256"},{"name": "context","type": "bytes"}]} - ]` - postPaymasterTransactionAbi, err := abi.JSON(strings.NewReader(jsondata)) - if err != nil { - return nil, err - } - postOpData, err := postPaymasterTransactionAbi.Pack("postPaymasterTransaction", true, big.NewInt(0), vpr.PaymasterContext) + postOpData, err := abiEncodePostPaymasterTransaction(vpr.PaymasterContext) if err != nil { return nil, err } @@ -526,34 +485,41 @@ func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainC }, nil } -func validateAccountReturnData(data []byte) (uint64, uint64, error) { - if len(data) != 32 { - return 0, 0, errors.New("invalid account return data length") +func validateAccountEntryPointCall(epc *EntryPointCall, sender *common.Address) (*AcceptAccountData, error) { + if epc.err != nil { + return nil, epc.err } - magicExpected, validUntil, validAfter := UnpackValidationData(data) - //todo: we check first 8 bytes of the 20-byte address (the rest is expected to be zeros) - if magicExpected != MAGIC_VALUE_SENDER { - if magicExpected == MAGIC_VALUE_SIGFAIL { - return 0, 0, errors.New("account signature error") - } - return 0, 0, errors.New("account did not return correct MAGIC_VALUE") + if epc.Input == nil { + return nil, errors.New("account validation did not call the EntryPoint 'acceptAccount' callback") + } + if len(epc.Input) != 68 { + return nil, errors.New("invalid account return data length") } - return validAfter, validUntil, nil + if epc.From.Cmp(*sender) != 0 { + return nil, errors.New("invalid call to EntryPoint contract from a wrong account address") + } + return abiDecodeAcceptAccount(epc.Input) } -func validatePaymasterReturnData(data []byte) (context []byte, validAfter, validUntil uint64, error error) { - if len(data) < 32 { - return nil, 0, 0, errors.New("invalid paymaster return data length") +func validatePaymasterEntryPointCall(epc *EntryPointCall, paymaster *common.Address) (*AcceptPaymasterData, error) { + if epc.err != nil { + return nil, epc.err } - validationData, context, err := UnpackPaymasterValidationReturn(data) - if err != nil { - return nil, 0, 0, err + if epc.Input == nil { + return nil, errors.New("paymaster validation did not call the EntryPoint 'acceptPaymaster' callback") + } + + if len(epc.Input) < 100 { + return nil, errors.New("invalid paymaster callback data length") } - magicExpected, validUntil, validAfter := UnpackValidationData(validationData) - if magicExpected != MAGIC_VALUE_PAYMASTER { - return nil, 0, 0, errors.New("paymaster did not return correct MAGIC_VALUE") + if epc.From.Cmp(*paymaster) != 0 { + return nil, errors.New("invalid call to EntryPoint contract from a wrong paymaster address") + } + apd, err := abiDecodeAcceptPaymaster(epc.Input) + if err != nil { + return nil, err } - return context, validAfter, validUntil, nil + return apd, nil } func validateValidityTimeRange(time uint64, validAfter uint64, validUntil uint64) error { @@ -571,3 +537,22 @@ func validateValidityTimeRange(time uint64, validAfter uint64, validUntil uint64 } return nil } + +func (epc *EntryPointCall) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if epc.OnEnterSuper != nil { + epc.OnEnterSuper(depth, typ, from, to, input, gas, value) + } + isRip7560EntryPoint := to.Cmp(AA_ENTRY_POINT) == 0 + if !isRip7560EntryPoint { + return + } + + if epc.Input != nil { + epc.err = errors.New("illegal repeated call to the EntryPoint callback") + return + } + + epc.Input = make([]byte, len(input)) + copy(epc.Input, input) + epc.From = from +} diff --git a/params/config.go b/params/config.go index 130a065e3791..0c783329504f 100644 --- a/params/config.go +++ b/params/config.go @@ -371,10 +371,6 @@ type ChainConfig struct { // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` - - // RIP-7560 specific config parameters - EntryPointAddress common.Address `json:"entryPointAddress,omitempty"` - DeployerCallerAddress common.Address `json:"deployerCallerAddress,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. diff --git a/tests/rip7560/paymaster_test.go b/tests/rip7560/paymaster_test.go index 9e6422624552..2317507f63f7 100644 --- a/tests/rip7560/paymaster_test.go +++ b/tests/rip7560/paymaster_test.go @@ -70,10 +70,10 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { // then we return the total length of above (context itself is uninitialized string of max+1 zeroes) pmCode := createCode( //vm.PUSH1, 1, vm.PUSH0, vm.RETURN, - copyToMemory(core.PackValidationData(core.MAGIC_VALUE_PAYMASTER, 0, 0), 0), + copyToMemory(core.PackValidationData(core.AcceptPaymasterMethodSig, 0, 0), 0), copyToMemory(asBytes32(64), 32), - copyToMemory(asBytes32(core.PAYMASTER_MAX_CONTEXT_SIZE+1), 64), - push(core.PAYMASTER_MAX_CONTEXT_SIZE+96+1), vm.PUSH0, vm.RETURN) + copyToMemory(asBytes32(core.PaymasterMaxContextSize+1), 64), + push(core.PaymasterMaxContextSize+96+1), vm.PUSH0, vm.RETURN) handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). withCode(DEFAULT_PAYMASTER.String(), pmCode, DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ @@ -86,7 +86,7 @@ func TestPaymasterValidationFailure_contextTooLarge(t *testing.T) { func TestPaymasterValidationFailure_validAfter(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). - withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 300, 200, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.AcceptPaymasterMethodSig, 300, 200, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGasLimit: 1000000000, PaymasterValidationGasLimit: 1000000000, GasFeeCap: big.NewInt(1000000000), @@ -96,7 +96,7 @@ func TestPaymasterValidationFailure_validAfter(t *testing.T) { func TestPaymasterValidationFailure_validUntil(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). - withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 1, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.AcceptPaymasterMethodSig, 1, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGasLimit: 1000000000, PaymasterValidationGasLimit: 1000000000, GasFeeCap: big.NewInt(1000000000), @@ -106,7 +106,7 @@ func TestPaymasterValidationFailure_validUntil(t *testing.T) { func TestPaymasterValidation_ok(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, createAccountCode(), 0). - withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.MAGIC_VALUE_PAYMASTER, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + withCode(DEFAULT_PAYMASTER.String(), returnWithData(paymasterReturnValue(core.AcceptPaymasterMethodSig, 0, 0, []byte{})), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGasLimit: 1000000000, PaymasterValidationGasLimit: 1000000000, GasFeeCap: big.NewInt(1000000000), diff --git a/tests/rip7560/rip7560TestUtils.go b/tests/rip7560/rip7560TestUtils.go index 5ebf610d002b..ddf4c55d9c6e 100644 --- a/tests/rip7560/rip7560TestUtils.go +++ b/tests/rip7560/rip7560TestUtils.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/status-im/keycard-go/hexutils" "math/big" - "slices" "testing" ) @@ -158,7 +157,7 @@ func returnWithData(data []byte) []byte { } func createAccountCode() []byte { - return returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 0, 0)) + return nil } // create EVM code from OpCode, byte and []bytes @@ -202,13 +201,3 @@ func createCode(items ...interface{}) []byte { func asBytes32(a int) []byte { return common.LeftPadBytes(big.NewInt(int64(a)).Bytes(), 32) } - -func paymasterReturnValue(magic, validUntil, validAfter uint64, context []byte) []byte { - validationData := core.PackValidationData(magic, validUntil, validAfter) - //manual encode (bytes32 validationData, bytes context) - return slices.Concat( - common.LeftPadBytes(validationData, 32), - asBytes32(64), - asBytes32(len(context)), - context) -} diff --git a/tests/rip7560/validation_test.go b/tests/rip7560/validation_test.go index 2c5bd24321b3..d39e5ba79cd4 100644 --- a/tests/rip7560/validation_test.go +++ b/tests/rip7560/validation_test.go @@ -21,11 +21,11 @@ func TestPackValidationData(t *testing.T) { assert.Equal(t, packed.Text(16), new(big.Int).SetBytes(core.PackValidationData(0x1234, 1, 2)).Text(16)) } -func TestUnpackValidationData(t *testing.T) { - packed := core.PackValidationData(0xdead, 0xcafe, 0xface) - magic, until, after := core.UnpackValidationData(packed) - assert.Equal(t, []uint64{0xdead, 0xcafe, 0xface}, []uint64{magic, until, after}) -} +// func TestUnpackValidationData(t *testing.T) { +// packed := core.PackValidationData(0xdead, 0xcafe, 0xface) +// magic, until, after := core.UnpackValidationData(packed) +// assert.Equal(t, []uint64{0xdead, 0xcafe, 0xface}, []uint64{magic, until, after}) +// } func TestValidationFailure_OOG(t *testing.T) { @@ -44,7 +44,7 @@ func TestValidationFailure_no_balance(t *testing.T) { } func TestValidationFailure_sigerror(t *testing.T) { - handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.MAGIC_VALUE_SIGFAIL, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, returnWithData(core.PackValidationData(core.SigFailAccountMethodSig, 0, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGasLimit: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "account signature error") @@ -53,7 +53,7 @@ func TestValidationFailure_sigerror(t *testing.T) { func TestValidationFailure_validAfter(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData(core.PackValidationData(core.AcceptAccountMethodSig, 300, 200)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGasLimit: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity not reached yet") @@ -62,7 +62,7 @@ func TestValidationFailure_validAfter(t *testing.T) { func TestValidationFailure_validUntil(t *testing.T) { handleTransaction(newTestContextBuilder(t).withCode(DEFAULT_SENDER, - returnWithData(core.PackValidationData(core.MAGIC_VALUE_SENDER, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ + returnWithData(core.PackValidationData(core.AcceptAccountMethodSig, 1, 0)), DEFAULT_BALANCE), types.Rip7560AccountAbstractionTx{ ValidationGasLimit: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), }, "RIP-7560 transaction validity expired") From 9cc74b6036c56ff3aa5ead5cfc567219a05abd43 Mon Sep 17 00:00:00 2001 From: shahafn Date: Mon, 19 Aug 2024 14:40:53 +0300 Subject: [PATCH 54/73] AA-336 Revert excution when postOp reverts (#29) * Revert excution when postOp reverts * Fixing execution,postOp * Adding statedb access list functions to be able to snapshot/revert --- core/state/statedb.go | 8 ++++++++ core/state_processor_rip7560.go | 27 ++++++++++++++++++--------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 61e76cdd7788..f69ddc301a2f 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -723,6 +723,14 @@ func (s *StateDB) Copy() *StateDB { return state } +func (s *StateDB) AccessListCopy() *accessList { + return s.accessList.Copy() +} + +func (s *StateDB) SetAccessList(al *accessList) { + s.accessList = al +} + // Snapshot returns an identifier for the current revision of the state. func (s *StateDB) Snapshot() int { id := s.nextRevisionId diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index efb2b22352e4..0381da0c37a6 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -313,7 +313,7 @@ func applyPaymasterValidationFrame(epc *EntryPointCall, tx *types.Transaction, c return apd.Context, nil, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil } -func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) (*ExecutionResult, error) { +func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool) (*ExecutionResult, error) { var paymasterPostOpResult *ExecutionResult paymasterPostOpMsg, err := preparePostOpMessage(vpr, evm.ChainConfig(), executionResult) if err != nil { @@ -323,14 +323,12 @@ func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *Exec if err != nil { return nil, err } - // TODO: revert the execution phase changes return paymasterPostOpResult, nil } func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhaseResult, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, cfg vm.Config) (*types.Receipt, error) { - // TODO: snapshot EVM - we will revert back here if postOp fails - + beforeExecSnapshotId := statedb.Snapshot() blockContext := NewEVMBlockContext(header, bc, author) message, err := TransactionToMessage(vpr.Tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) txContext := NewEVMTxContext(message) @@ -339,13 +337,24 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase accountExecutionMsg := prepareAccountExecutionMessage(vpr.Tx, evm.ChainConfig()) executionResult, err := ApplyMessage(evm, accountExecutionMsg, gp) + executionAL := statedb.AccessListCopy() if err != nil { return nil, err } + beforePostSnapshotId := statedb.Snapshot() var paymasterPostOpResult *ExecutionResult if len(vpr.PaymasterContext) != 0 { - paymasterPostOpResult, err = applyPaymasterPostOpFrame(vpr, executionResult, evm, gp, statedb, header) + paymasterPostOpResult, err = applyPaymasterPostOpFrame(vpr, executionResult, evm, gp) + } + + // PostOp failed, reverting execution changes + if paymasterPostOpResult != nil && paymasterPostOpResult.Err != nil { + statedb.RevertToSnapshot(beforePostSnapshotId) + // Workaround a bug in snapshot/revert - can't be called after multiple ApplyMessage() calls + statedb.SetAccessList(executionAL) + statedb.RevertToSnapshot(beforeExecSnapshotId) } + if err != nil { return nil, err } @@ -393,7 +402,7 @@ func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfi GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, Data: tx.DeployerData, - AccessList: make(types.AccessList, 0), + AccessList: nil, SkipAccountChecks: true, IsRip7560Frame: true, } @@ -414,7 +423,7 @@ func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *par GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, Data: data, - AccessList: make(types.AccessList, 0), + AccessList: nil, SkipAccountChecks: true, IsRip7560Frame: true, }, nil @@ -438,7 +447,7 @@ func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, Data: data, - AccessList: make(types.AccessList, 0), + AccessList: nil, SkipAccountChecks: true, IsRip7560Frame: true, }, nil @@ -455,7 +464,7 @@ func prepareAccountExecutionMessage(baseTx *types.Transaction, config *params.Ch GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, Data: tx.Data, - AccessList: make(types.AccessList, 0), + AccessList: nil, SkipAccountChecks: true, IsRip7560Frame: true, } From de1af9955f8ba1d8c5680c070d9221f277fdee0c Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 19 Aug 2024 16:28:25 +0200 Subject: [PATCH 55/73] AA-412: Implement RIP-7712 NonceManager support (#27) * Add RIP-7712 nonce field to the RIP-7560 transaction type * Do not increment legacy nonce for RIP-7712 transactions * Handle on-chain NonceManager revert --- core/rip7712_nonce.go | 40 +++++++++++++++++++++++++++++ core/state_processor_rip7560.go | 33 +++++++++++++++++++++--- core/types/tx_rip7560.go | 25 ++++++++++++++---- internal/ethapi/api.go | 4 +++ internal/ethapi/transaction_args.go | 4 +++ params/config.go | 7 +++++ 6 files changed, 105 insertions(+), 8 deletions(-) create mode 100644 core/rip7712_nonce.go diff --git a/core/rip7712_nonce.go b/core/rip7712_nonce.go new file mode 100644 index 000000000000..e77388ed2325 --- /dev/null +++ b/core/rip7712_nonce.go @@ -0,0 +1,40 @@ +package core + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" + "math/big" +) + +// TODO: accept address as configuration parameter +var AA_NONCE_MANAGER = common.HexToAddress("0x63f63e798f5F6A934Acf0a3FD1C01f3Fac851fF0") + +func prepareNonceManagerMessage(baseTx *types.Transaction) *Message { + // TODO: this can probably be done a lot easier, check syntax + tx := baseTx.Rip7560TransactionData() + nonceKey := make([]byte, 24) + nonce := make([]byte, 8) + nonceKey256, _ := uint256.FromBig(tx.NonceKey) + nonce256 := uint256.NewInt(tx.Nonce) + nonceKey256.WriteToSlice(nonceKey) + nonce256.WriteToSlice(nonce) + + nonceManagerData := make([]byte, 0) + nonceManagerData = append(nonceManagerData[:], tx.Sender.Bytes()...) + nonceManagerData = append(nonceManagerData[:], nonceKey...) + nonceManagerData = append(nonceManagerData[:], nonce...) + return &Message{ + From: AA_ENTRY_POINT, + To: &AA_NONCE_MANAGER, + Value: big.NewInt(0), + GasLimit: 100000, + GasPrice: tx.GasFeeCap, + GasFeeCap: tx.GasFeeCap, + GasTipCap: tx.GasTipCap, + Data: nonceManagerData, + AccessList: make(types.AccessList, 0), + SkipAccountChecks: true, + IsRip7560Frame: true, + } +} diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 0381da0c37a6..b77155b181b5 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -30,6 +30,7 @@ type ValidationPhaseResult struct { PaymasterContext []byte PreCharge *uint256.Int EffectiveGasPrice *uint256.Int + NonceManagerUsedGas uint64 DeploymentUsedGas uint64 ValidationUsedGas uint64 PmValidationUsedGas uint64 @@ -136,10 +137,14 @@ func refundPayer(vpr *ValidationPhaseResult, state vm.StateDB, gasUsed uint64) { state.AddBalance(*chargeFrom, refund, tracing.BalanceIncreaseGasReturn) } -// precheck nonce of transaction. +// CheckNonceRip7560 pre-checks nonce of RIP-7560 transaction that don't rely on RIP-7712 two-dimensional nonces. // (standard preCheck function check both nonce and no-code of account) +// Make sure this transaction's nonce is correct. func CheckNonceRip7560(tx *types.Rip7560AccountAbstractionTx, st *state.StateDB) error { - // Make sure this transaction's nonce is correct. + // RIP-7712 two-dimensional nonce is checked on-chain + if tx.IsRip7712Nonce() { + return nil + } stNonce := st.GetNonce(*tx.Sender) if msgNonce := tx.Nonce; stNonce < msgNonce { return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, @@ -197,6 +202,24 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, common.Address{}) } + /*** Nonce Manager Frame ***/ + var nonceManagerUsedGas uint64 + if aatx.IsRip7712Nonce() { + if chainConfig.IsRIP7712(header.Number) { + nonceManagerMessage := prepareNonceManagerMessage(tx) + resultNonceManager, err := ApplyMessage(evm, nonceManagerMessage, gp) + if err != nil { + return nil, fmt.Errorf("RIP-7712 nonce validation failed: %w", err) + } + if resultNonceManager.Failed() { + return nil, fmt.Errorf("RIP-7712 nonce validation failed: %w", resultNonceManager.Err) + } + nonceManagerUsedGas = resultNonceManager.UsedGas + } else { + return nil, fmt.Errorf("RIP-7712 nonce is disabled") + } + } + /*** Deployer Frame ***/ deployerMsg := prepareDeployerMessage(tx, chainConfig) var deploymentUsedGas uint64 @@ -225,7 +248,9 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte if statedb.GetCodeSize(*sender) == 0 { return nil, fmt.Errorf("account is not deployed and no factory is specified, account:%s", sender.String()) } - statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) + if !aatx.IsRip7712Nonce() { + statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) + } } /*** Account Validation Frame ***/ @@ -275,6 +300,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte vpr.EffectiveGasPrice = gasPriceUint256 vpr.PaymasterContext = paymasterContext vpr.DeploymentUsedGas = deploymentUsedGas + vpr.NonceManagerUsedGas = nonceManagerUsedGas vpr.ValidationUsedGas = resultAccountValidation.UsedGas vpr.PmValidationUsedGas = pmValidationUsedGas vpr.SenderValidAfter = aad.ValidAfter.Uint64() @@ -361,6 +387,7 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase gasUsed := vpr.ValidationUsedGas + + vpr.NonceManagerUsedGas + vpr.DeploymentUsedGas + vpr.PmValidationUsedGas + executionResult.UsedGas diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 39b6960664ca..839fd8a91ece 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -28,6 +28,7 @@ import ( type Rip7560AccountAbstractionTx struct { // overlapping fields ChainID *big.Int + Nonce uint64 GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas GasFeeCap *big.Int // a.k.a. maxFeePerGas Gas uint64 @@ -46,19 +47,22 @@ type Rip7560AccountAbstractionTx struct { PaymasterValidationGasLimit uint64 PostOpGas uint64 + // RIP-7712 two-dimensional nonce (optional), 192 bits + NonceKey *big.Int + // removed fields To *common.Address `rlp:"nil"` - Nonce uint64 Value *big.Int } // copy creates a deep copy of the transaction data and initializes all fields. func (tx *Rip7560AccountAbstractionTx) copy() TxData { cpy := &Rip7560AccountAbstractionTx{ - To: copyAddressPtr(tx.To), - Data: common.CopyBytes(tx.Data), - Nonce: tx.Nonce, - Gas: tx.Gas, + To: copyAddressPtr(tx.To), + Data: common.CopyBytes(tx.Data), + Nonce: tx.Nonce, + NonceKey: new(big.Int), + Gas: tx.Gas, // These are copied below. AccessList: make(AccessList, len(tx.AccessList)), Value: new(big.Int), @@ -93,6 +97,9 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { if tx.BuilderFee != nil { cpy.BuilderFee.Set(tx.BuilderFee) } + if tx.NonceKey != nil { + cpy.NonceKey.Set(tx.NonceKey) + } return cpy } @@ -109,6 +116,11 @@ func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return tx.Value func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return tx.Nonce } func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return tx.To } +// IsRip7712Nonce returns true if the transaction uses an RIP-7712 two-dimensional nonce +func (tx *Rip7560AccountAbstractionTx) IsRip7712Nonce() bool { + return tx.NonceKey != nil && tx.NonceKey.Cmp(big.NewInt(0)) == 1 +} + func (tx *Rip7560AccountAbstractionTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { if baseFee == nil { return dst.Set(tx.GasFeeCap) @@ -152,6 +164,7 @@ func (tx *Rip7560AccountAbstractionTx) decode(input []byte) error { // Rip7560Transaction an equivalent of a solidity struct only used to encode the 'transaction' parameter type Rip7560Transaction struct { Sender common.Address + NonceKey *big.Int Nonce *big.Int ValidationGasLimit *big.Int PaymasterValidationGasLimit *big.Int @@ -171,6 +184,7 @@ type Rip7560Transaction struct { func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { structThing, _ := abi.NewType("tuple", "struct thing", []abi.ArgumentMarshaling{ {Name: "sender", Type: "address"}, + {Name: "nonceKey", Type: "uint256"}, {Name: "nonce", Type: "uint256"}, {Name: "validationGasLimit", Type: "uint256"}, {Name: "paymasterValidationGasLimit", Type: "uint256"}, @@ -202,6 +216,7 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { record := &Rip7560Transaction{ Sender: *tx.Sender, + NonceKey: tx.NonceKey, Nonce: big.NewInt(int64(tx.Nonce)), ValidationGasLimit: big.NewInt(int64(tx.ValidationGasLimit)), PaymasterValidationGasLimit: big.NewInt(int64(tx.PaymasterValidationGasLimit)), diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index b6181dec8f8a..5e685aa8aafd 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1361,6 +1361,9 @@ type RPCTransaction struct { ValidationGas *hexutil.Uint64 `json:"verificationGasLimit,omitempty"` PaymasterValidationGasLimit *hexutil.Uint64 `json:"paymasterVerificationGasLimit,omitempty"` PostOpGas *hexutil.Uint64 `json:"paymasterPostOpGasLimit,omitempty"` + + // Introduced by RIP-7712 + NonceKey *hexutil.Big `json:"nonceKey,omitempty"` } func toBytes(data []byte) *hexutil.Bytes { @@ -1439,6 +1442,7 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.S = nil result.R = nil result.V = nil + result.NonceKey = (*hexutil.Big)(rip7560Tx.NonceKey) result.Input = rip7560Tx.Data result.Sender = rip7560Tx.Sender result.Signature = toBytes(rip7560Tx.Signature) diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index bdff65a038a1..eee4dc4e4242 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -86,6 +86,9 @@ type TransactionArgs struct { ValidationGas *hexutil.Uint64 `json:"verificationGasLimit"` PaymasterGas *hexutil.Uint64 `json:"paymasterVerificationGasLimit"` PostOpGas *hexutil.Uint64 `json:"paymasterPostOpGasLimit"` + + // Introduced by RIP-7712 Transaction + NonceKey *hexutil.Big `json:"nonceKey,omitempty"` } // from retrieves the transaction sender address. @@ -522,6 +525,7 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { To: &common.Address{}, ChainID: (*big.Int)(args.ChainID), Gas: uint64(*args.Gas), + NonceKey: (*big.Int)(args.NonceKey), Nonce: uint64(*args.Nonce), GasFeeCap: (*big.Int)(args.MaxFeePerGas), GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas), diff --git a/params/config.go b/params/config.go index 0c783329504f..abc674ede54b 100644 --- a/params/config.go +++ b/params/config.go @@ -174,6 +174,7 @@ var ( EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), RIP7560Block: big.NewInt(0), + RIP7712Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), @@ -338,6 +339,7 @@ type ChainConfig struct { EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block RIP7560Block *big.Int `json:"rip7560block,omitempty"` // RIP7560 HF block + RIP7712Block *big.Int `json:"rip7712block,omitempty"` // RIP7712 HF block ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium) ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated) @@ -594,6 +596,11 @@ func (c *ChainConfig) IsRIP7560(num *big.Int) bool { return isBlockForked(c.RIP7560Block, num) } +// IsRIP7712 returns whether RIP7712 has been activated at given block. +func (c *ChainConfig) IsRIP7712(num *big.Int) bool { + return isBlockForked(c.RIP7712Block, num) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { From 889c0b84a1b1a42f9f10db5ecc4bbbcc7ee27255 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 20 Aug 2024 12:33:17 +0200 Subject: [PATCH 56/73] AA-419: "signature" -> "authorizationData" & "calldata" -> "executionData" (#30) * AA-419: "signature" -> "authorizationData", "calldata" -> "executionData" --- core/state_processor_rip7560.go | 2 +- core/types/transaction_signing_rip7560.go | 1 - core/types/tx_rip7560.go | 52 +++++++++-------------- internal/ethapi/api.go | 9 ++-- internal/ethapi/transaction_args.go | 52 ++++++++++++++--------- tests/rip7560/process_test.go | 2 +- 6 files changed, 60 insertions(+), 58 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index b77155b181b5..43afaa7dfdc1 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -490,7 +490,7 @@ func prepareAccountExecutionMessage(baseTx *types.Transaction, config *params.Ch GasPrice: tx.GasFeeCap, GasFeeCap: tx.GasFeeCap, GasTipCap: tx.GasTipCap, - Data: tx.Data, + Data: tx.ExecutionData, AccessList: nil, SkipAccountChecks: true, IsRip7560Frame: true, diff --git a/core/types/transaction_signing_rip7560.go b/core/types/transaction_signing_rip7560.go index 79ef14efaebc..99a5721619f1 100644 --- a/core/types/transaction_signing_rip7560.go +++ b/core/types/transaction_signing_rip7560.go @@ -32,7 +32,6 @@ func (s rip7560Signer) Hash(tx *Transaction) common.Hash { tx.GasTipCap(), tx.GasFeeCap(), tx.Gas(), - //tx.To(), tx.Data(), tx.AccessList(), diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 839fd8a91ece..772afa909fff 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -32,12 +32,12 @@ type Rip7560AccountAbstractionTx struct { GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas GasFeeCap *big.Int // a.k.a. maxFeePerGas Gas uint64 - Data []byte AccessList AccessList // extra fields Sender *common.Address - Signature []byte + AuthorizationData []byte + ExecutionData []byte Paymaster *common.Address `rlp:"nil"` PaymasterData []byte Deployer *common.Address `rlp:"nil"` @@ -49,29 +49,25 @@ type Rip7560AccountAbstractionTx struct { // RIP-7712 two-dimensional nonce (optional), 192 bits NonceKey *big.Int - - // removed fields - To *common.Address `rlp:"nil"` - Value *big.Int } // copy creates a deep copy of the transaction data and initializes all fields. func (tx *Rip7560AccountAbstractionTx) copy() TxData { cpy := &Rip7560AccountAbstractionTx{ - To: copyAddressPtr(tx.To), - Data: common.CopyBytes(tx.Data), - Nonce: tx.Nonce, - NonceKey: new(big.Int), - Gas: tx.Gas, + //To: copyAddressPtr(tx.To), + ExecutionData: common.CopyBytes(tx.ExecutionData), + Nonce: tx.Nonce, + NonceKey: new(big.Int), + Gas: tx.Gas, // These are copied below. AccessList: make(AccessList, len(tx.AccessList)), - Value: new(big.Int), - ChainID: new(big.Int), - GasTipCap: new(big.Int), - GasFeeCap: new(big.Int), + //Value: new(big.Int), + ChainID: new(big.Int), + GasTipCap: new(big.Int), + GasFeeCap: new(big.Int), Sender: copyAddressPtr(tx.Sender), - Signature: common.CopyBytes(tx.Signature), + AuthorizationData: common.CopyBytes(tx.AuthorizationData), Paymaster: copyAddressPtr(tx.Paymaster), PaymasterData: common.CopyBytes(tx.PaymasterData), Deployer: copyAddressPtr(tx.Deployer), @@ -82,9 +78,6 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { PostOpGas: tx.PostOpGas, } copy(cpy.AccessList, tx.AccessList) - if tx.Value != nil { - cpy.Value.Set(tx.Value) - } if tx.ChainID != nil { cpy.ChainID.Set(tx.ChainID) } @@ -107,14 +100,14 @@ func (tx *Rip7560AccountAbstractionTx) copy() TxData { func (tx *Rip7560AccountAbstractionTx) txType() byte { return Rip7560Type } func (tx *Rip7560AccountAbstractionTx) chainID() *big.Int { return tx.ChainID } func (tx *Rip7560AccountAbstractionTx) accessList() AccessList { return tx.AccessList } -func (tx *Rip7560AccountAbstractionTx) data() []byte { return tx.Data } +func (tx *Rip7560AccountAbstractionTx) data() []byte { return make([]byte, 0) } func (tx *Rip7560AccountAbstractionTx) gas() uint64 { return tx.Gas } func (tx *Rip7560AccountAbstractionTx) gasFeeCap() *big.Int { return tx.GasFeeCap } func (tx *Rip7560AccountAbstractionTx) gasTipCap() *big.Int { return tx.GasTipCap } func (tx *Rip7560AccountAbstractionTx) gasPrice() *big.Int { return tx.GasFeeCap } -func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return tx.Value } +func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return big.NewInt(0) } func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return tx.Nonce } -func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return tx.To } +func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return nil } // IsRip7712Nonce returns true if the transaction uses an RIP-7712 two-dimensional nonce func (tx *Rip7560AccountAbstractionTx) IsRip7712Nonce() bool { @@ -150,9 +143,6 @@ func (t *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { if tx.Deployer != nil && zeroAddress.Cmp(*tx.Deployer) == 0 { tx.Deployer = nil } - if tx.To != nil && zeroAddress.Cmp(*tx.To) == 0 { - tx.To = nil - } return rlp.Encode(b, tx) } @@ -177,8 +167,8 @@ type Rip7560Transaction struct { PaymasterData []byte Deployer common.Address DeployerData []byte - CallData []byte - Signature []byte + ExecutionData []byte + AuthorizationData []byte } func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { @@ -197,8 +187,8 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { {Name: "paymasterData", Type: "bytes"}, {Name: "deployer", Type: "address"}, {Name: "deployerData", Type: "bytes"}, - {Name: "callData", Type: "bytes"}, - {Name: "signature", Type: "bytes"}, + {Name: "executionData", Type: "bytes"}, + {Name: "authorizationData", Type: "bytes"}, }) args := abi.Arguments{ @@ -229,8 +219,8 @@ func (tx *Rip7560AccountAbstractionTx) AbiEncode() ([]byte, error) { PaymasterData: tx.PaymasterData, Deployer: *deployer, DeployerData: tx.DeployerData, - CallData: tx.Data, - Signature: tx.Signature, + ExecutionData: tx.ExecutionData, + AuthorizationData: tx.AuthorizationData, } packed, err := args.Pack(&record) return packed, err diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 5e685aa8aafd..0e3c326a7a4d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1352,7 +1352,8 @@ type RPCTransaction struct { // Introduced by RIP-7560 Transaction Sender *common.Address `json:"sender,omitempty"` - Signature *hexutil.Bytes `json:"signature,omitempty"` + AuthorizationData *hexutil.Bytes `json:"authorizationData,omitempty"` + ExecutionData *hexutil.Bytes `json:"executionData,omitempty"` Paymaster *common.Address `json:"paymaster,omitempty"` PaymasterData *hexutil.Bytes `json:"paymasterData,omitempty"` Deployer *common.Address `json:"deployer,omitempty"` @@ -1442,10 +1443,12 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.S = nil result.R = nil result.V = nil + result.To = nil result.NonceKey = (*hexutil.Big)(rip7560Tx.NonceKey) - result.Input = rip7560Tx.Data + result.Input = make(hexutil.Bytes, 0) result.Sender = rip7560Tx.Sender - result.Signature = toBytes(rip7560Tx.Signature) + result.AuthorizationData = toBytes(rip7560Tx.AuthorizationData) + result.ExecutionData = toBytes(rip7560Tx.ExecutionData) result.Gas = hexutil.Uint64(tx.Gas()) result.Paymaster = rip7560Tx.Paymaster result.PaymasterData = toBytes(rip7560Tx.PaymasterData) diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index eee4dc4e4242..8ea5ebe6340a 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -76,16 +76,17 @@ type TransactionArgs struct { blobSidecarAllowed bool // Introduced by RIP-7560 Transaction - Sender *common.Address `json:"sender"` - Signature *hexutil.Bytes - Paymaster *common.Address `json:"paymaster,omitempty"` - PaymasterData *hexutil.Bytes `json:"paymasterData,omitempty"` - Deployer *common.Address `json:"deployer,omitempty"` - DeployerData *hexutil.Bytes `json:"deployerData,omitempty"` - BuilderFee *hexutil.Big - ValidationGas *hexutil.Uint64 `json:"verificationGasLimit"` - PaymasterGas *hexutil.Uint64 `json:"paymasterVerificationGasLimit"` - PostOpGas *hexutil.Uint64 `json:"paymasterPostOpGasLimit"` + Sender *common.Address `json:"sender"` + AuthorizationData *hexutil.Bytes `json:"authorizationData,omitempty"` + ExecutionData *hexutil.Bytes `json:"executionData,omitempty"` + Paymaster *common.Address `json:"paymaster,omitempty"` + PaymasterData *hexutil.Bytes `json:"paymasterData,omitempty"` + Deployer *common.Address `json:"deployer,omitempty"` + DeployerData *hexutil.Bytes `json:"deployerData,omitempty"` + BuilderFee *hexutil.Big `json:"builderFee,omitempty"` + ValidationGas *hexutil.Uint64 `json:"verificationGasLimit"` + PaymasterGas *hexutil.Uint64 `json:"paymasterVerificationGasLimit"` + PostOpGas *hexutil.Uint64 `json:"paymasterPostOpGasLimit"` // Introduced by RIP-7712 Transaction NonceKey *hexutil.Big `json:"nonceKey,omitempty"` @@ -522,19 +523,19 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { al = *args.AccessList } aatx := types.Rip7560AccountAbstractionTx{ - To: &common.Address{}, - ChainID: (*big.Int)(args.ChainID), - Gas: uint64(*args.Gas), - NonceKey: (*big.Int)(args.NonceKey), - Nonce: uint64(*args.Nonce), - GasFeeCap: (*big.Int)(args.MaxFeePerGas), - GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas), - Value: (*big.Int)(args.Value), - Data: args.data(), - AccessList: al, + //To: &common.Address{}, + ChainID: (*big.Int)(args.ChainID), + Gas: uint64(*args.Gas), + NonceKey: (*big.Int)(args.NonceKey), + Nonce: uint64(*args.Nonce), + GasFeeCap: (*big.Int)(args.MaxFeePerGas), + GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas), + //Value: (*big.Int)(args.Value), + ExecutionData: *args.ExecutionData, + AccessList: al, // RIP-7560 parameters Sender: args.Sender, - Signature: *args.Signature, + AuthorizationData: *args.AuthorizationData, Paymaster: args.Paymaster, PaymasterData: *args.PaymasterData, Deployer: args.Deployer, @@ -544,6 +545,15 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction { PaymasterValidationGasLimit: toUint64(args.PaymasterGas), PostOpGas: toUint64(args.PostOpGas), } + + zeroAddress := common.Address{} + if aatx.Paymaster != nil && zeroAddress.Cmp(*aatx.Paymaster) == 0 { + aatx.Paymaster = nil + } + if aatx.Deployer != nil && zeroAddress.Cmp(*aatx.Deployer) == 0 { + aatx.Deployer = nil + } + data = &aatx hash := types.NewTx(data).Hash() log.Error("RIP-7560 transaction created", "sender", aatx.Sender.Hex(), "hash", hash) diff --git a/tests/rip7560/process_test.go b/tests/rip7560/process_test.go index 9ddb0fca08e5..b0aba5bbaf54 100644 --- a/tests/rip7560/process_test.go +++ b/tests/rip7560/process_test.go @@ -45,7 +45,7 @@ func TestProcess1(t *testing.T) { Sender: &Sender, ValidationGasLimit: uint64(1000000000), GasFeeCap: big.NewInt(1000000000), - Data: []byte{1, 2, 3}, + ExecutionData: []byte{1, 2, 3}, }, }) assert.NoError(t, err) From bf81276921f606d62c3b355b7d90393c077627a1 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 20 Aug 2024 18:25:14 +0200 Subject: [PATCH 57/73] AA-417: Fix ignored paymaster revert (#31) * AA-419: "signature" -> "authorizationData", "calldata" -> "executionData" --- core/state_processor_rip7560.go | 176 ++++++++++++++++++++--------- eth/tracers/api_tracing_rip7560.go | 59 +++------- miner/worker.go | 6 +- 3 files changed, 144 insertions(+), 97 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 43afaa7dfdc1..4553c53d4ae4 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -3,7 +3,9 @@ package core import ( "errors" "fmt" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" @@ -38,8 +40,54 @@ type ValidationPhaseResult struct { SenderValidUntil uint64 PmValidAfter uint64 PmValidUntil uint64 - RevertReason []byte - RevertEntityName string +} + +// ValidationPhaseError is an API error that encompasses an EVM revert with JSON error +// code and a binary data blob. +type ValidationPhaseError struct { + error + reason string // revert reason hex encoded +} + +func (v *ValidationPhaseError) Error() string { + return v.error.Error() +} + +func (v *ValidationPhaseError) ErrorData() interface{} { + return v.reason +} + +// newValidationPhaseError creates a revertError instance with the provided revert data. +func newValidationPhaseError( + innerErr error, + revertReason []byte, + revertEntityName *string, +) *ValidationPhaseError { + var errorMessage string + contractSubst := "" + if revertEntityName != nil { + contractSubst = fmt.Sprintf(" in contract %s", *revertEntityName) + } + if innerErr != nil { + errorMessage = fmt.Sprintf( + "validation phase failed%s with exception: %s", + contractSubst, + innerErr.Error(), + ) + } else { + errorMessage = fmt.Sprintf("validation phase failed%s", contractSubst) + } + // TODO: use "vm.ErrorX" for RIP-7560 specific errors as well! + err := errors.New(errorMessage) + + reason, errUnpack := abi.UnpackRevert(revertReason) + if errUnpack == nil { + err = fmt.Errorf("%w: %v", err, reason) + } + return &ValidationPhaseError{ + error: err, + reason: hexutil.Encode(revertReason), + } } // HandleRip7560Transactions apply state changes of all sequential RIP-7560 transactions and return @@ -72,9 +120,9 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta statedb.SetTxContext(tx.Hash(), index+i) - vpr, err := ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) - if err != nil { - return nil, nil, nil, err + vpr, vpe := ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) + if vpe != nil { + return nil, nil, nil, vpe } validationPhaseResults = append(validationPhaseResults, vpr) validatedTransactions = append(validatedTransactions, tx) @@ -159,11 +207,22 @@ func CheckNonceRip7560(tx *types.Rip7560AccountAbstractionTx, st *state.StateDB) return nil } -func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) (*ValidationPhaseResult, error) { +func ptr(s string) *string { return &s } + +func ApplyRip7560ValidationPhases( + chainConfig *params.ChainConfig, + bc ChainContext, + author *common.Address, + gp *GasPool, + statedb *state.StateDB, + header *types.Header, + tx *types.Transaction, + cfg vm.Config, +) (*ValidationPhaseResult, error) { aatx := tx.Rip7560TransactionData() err := CheckNonceRip7560(aatx, statedb) if err != nil { - return nil, err + return nil, newValidationPhaseError(err, nil, nil) } gasPrice := new(big.Int).Add(header.BaseFee, tx.GasTipCap()) @@ -174,7 +233,7 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) if err != nil { - return nil, err + return nil, newValidationPhaseError(err, nil, nil) } blockContext := NewEVMBlockContext(header, bc, author) @@ -205,19 +264,22 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte /*** Nonce Manager Frame ***/ var nonceManagerUsedGas uint64 if aatx.IsRip7712Nonce() { - if chainConfig.IsRIP7712(header.Number) { - nonceManagerMessage := prepareNonceManagerMessage(tx) - resultNonceManager, err := ApplyMessage(evm, nonceManagerMessage, gp) - if err != nil { - return nil, fmt.Errorf("RIP-7712 nonce validation failed: %w", err) - } - if resultNonceManager.Failed() { - return nil, fmt.Errorf("RIP-7712 nonce validation failed: %w", resultNonceManager.Err) - } - nonceManagerUsedGas = resultNonceManager.UsedGas - } else { - return nil, fmt.Errorf("RIP-7712 nonce is disabled") + if !chainConfig.IsRIP7712(header.Number) { + return nil, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce is disabled"), nil, nil) + } + nonceManagerMessage := prepareNonceManagerMessage(tx) + resultNonceManager, err := ApplyMessage(evm, nonceManagerMessage, gp) + if err != nil { + return nil, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce validation failed: %w", err), nil, nil) } + if resultNonceManager.Failed() { + return nil, newValidationPhaseError( + fmt.Errorf("RIP-7712 nonce validation failed: %w", resultNonceManager.Err), + resultNonceManager.ReturnData, + ptr("NonceManager"), + ) + } + nonceManagerUsedGas = resultNonceManager.UsedGas } /*** Deployer Frame ***/ @@ -232,21 +294,29 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte resultDeployer, err = ApplyMessage(evm, deployerMsg, gp) } if err != nil { - return nil, fmt.Errorf("account deployment failed: %v", err) + return nil, newValidationPhaseError(fmt.Errorf("account deployment failed: %v", err), nil, nil) } if resultDeployer.Failed() { - return &ValidationPhaseResult{ - RevertEntityName: "deployer", - RevertReason: resultDeployer.ReturnData, - }, nil + return nil, newValidationPhaseError( + resultDeployer.Err, + resultDeployer.ReturnData, + ptr("deployer"), + ) } if statedb.GetCodeSize(*sender) == 0 { - return nil, fmt.Errorf("account was not deployed by a factory, account:%s factory%s", sender.String(), deployerMsg.To.String()) + return nil, newValidationPhaseError( + fmt.Errorf( + "account was not deployed by a factory, account:%s factory%s", + sender.String(), deployerMsg.To.String(), + ), nil, nil) } deploymentUsedGas = resultDeployer.UsedGas } else { if statedb.GetCodeSize(*sender) == 0 { - return nil, fmt.Errorf("account is not deployed and no factory is specified, account:%s", sender.String()) + return nil, newValidationPhaseError( + fmt.Errorf( + "account is not deployed and no factory is specified, account:%s", sender.String(), + ), nil, nil) } if !aatx.IsRip7712Nonce() { statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) @@ -259,17 +329,18 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte accountValidationMsg, err := prepareAccountValidationMessage(tx, chainConfig, signingHash, deploymentUsedGas) resultAccountValidation, err := ApplyMessage(evm, accountValidationMsg, gp) if err != nil { - return nil, err + return nil, newValidationPhaseError(err, nil, nil) } if resultAccountValidation.Failed() { - return &ValidationPhaseResult{ - RevertEntityName: "account", - RevertReason: resultAccountValidation.ReturnData, - }, nil + return nil, newValidationPhaseError( + resultAccountValidation.Err, + resultAccountValidation.ReturnData, + ptr("account"), + ) } aad, err := validateAccountEntryPointCall(epc, aatx.Sender) if err != nil { - return nil, err + return nil, newValidationPhaseError(err, nil, nil) } // clear the EntryPoint calls array after parsing @@ -279,21 +350,15 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte err = validateValidityTimeRange(header.Time, aad.ValidAfter.Uint64(), aad.ValidUntil.Uint64()) if err != nil { - return nil, err + return nil, newValidationPhaseError(err, nil, nil) } - vpr := &ValidationPhaseResult{} - paymasterContext, paymasterRevertReason, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(epc, tx, chainConfig, signingHash, evm, gp, statedb, header) - if err != nil { - return nil, err - } - if paymasterRevertReason != nil { - return &ValidationPhaseResult{ - RevertEntityName: "paymaster", - RevertReason: paymasterRevertReason, - }, nil + paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, vpe := applyPaymasterValidationFrame(epc, tx, chainConfig, signingHash, evm, gp, statedb, header) + if vpe != nil { + return nil, vpe } + vpr := &ValidationPhaseResult{} vpr.Tx = tx vpr.TxHash = tx.Hash() vpr.PreCharge = preCharge @@ -312,31 +377,38 @@ func ApplyRip7560ValidationPhases(chainConfig *params.ChainConfig, bc ChainConte return vpr, nil } -func applyPaymasterValidationFrame(epc *EntryPointCall, tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, []byte, uint64, uint64, uint64, error) { +func applyPaymasterValidationFrame(epc *EntryPointCall, tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, uint64, uint64, uint64, error) { /*** Paymaster Validation Frame ***/ aatx := tx.Rip7560TransactionData() var pmValidationUsedGas uint64 paymasterMsg, err := preparePaymasterValidationMessage(tx, chainConfig, signingHash) - if paymasterMsg == nil || err != nil { - return nil, nil, 0, 0, 0, err + if err != nil { + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) + } + if paymasterMsg == nil { + return nil, 0, 0, 0, nil } resultPm, err := ApplyMessage(evm, paymasterMsg, gp) if err != nil { - return nil, nil, 0, 0, 0, err + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) } if resultPm.Failed() { - return nil, resultPm.ReturnData, 0, 0, 0, nil + return nil, 0, 0, 0, newValidationPhaseError( + resultPm.Err, + resultPm.ReturnData, + ptr("paymaster"), + ) } pmValidationUsedGas = resultPm.UsedGas apd, err := validatePaymasterEntryPointCall(epc, aatx.Paymaster) if err != nil { - return nil, nil, 0, 0, 0, err + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) } err = validateValidityTimeRange(header.Time, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64()) if err != nil { - return nil, nil, 0, 0, 0, err + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) } - return apd.Context, nil, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil + return apd.Context, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil } func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool) (*ExecutionResult, error) { diff --git a/eth/tracers/api_tracing_rip7560.go b/eth/tracers/api_tracing_rip7560.go index 88da7889e631..03a5667c5e1d 100644 --- a/eth/tracers/api_tracing_rip7560.go +++ b/eth/tracers/api_tracing_rip7560.go @@ -4,9 +4,7 @@ import ( "context" "errors" "fmt" - "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -17,35 +15,6 @@ import ( "time" ) -// note: revertError code is copied here from the 'ethapi' package - -// revertError is an API error that encompasses an EVM revert with JSON error -// code and a binary data blob. -type validationRevertError struct { - error - reason string // revert reason hex encoded -} - -func (v *validationRevertError) ErrorData() interface{} { - return v.reason -} - -// newValidationRevertError creates a revertError instance with the provided revert data. -func newValidationRevertError(vpr *core.ValidationPhaseResult) *validationRevertError { - errorMessage := fmt.Sprintf("validation phase reverted in contract %s", vpr.RevertEntityName) - // TODO: use "vm.ErrorX" for RIP-7560 specific errors as well! - err := errors.New(errorMessage) - - reason, errUnpack := abi.UnpackRevert(vpr.RevertReason) - if errUnpack == nil { - err = fmt.Errorf("%w: %v", err, reason) - } - return &validationRevertError{ - error: err, - reason: hexutil.Encode(vpr.RevertReason), - } -} - // Rip7560API is the collection of tracing APIs exposed over the private debugging endpoint. type Rip7560API struct { backend Backend @@ -86,14 +55,11 @@ func (api *Rip7560API) TraceRip7560Validation( if config != nil { traceConfig = &config.TraceConfig } - traceResult, vpr, err := api.traceTx(ctx, tx, new(Context), block, vmctx, statedb, traceConfig) + traceResult, err := api.traceTx(ctx, tx, new(Context), block, vmctx, statedb, traceConfig) if err != nil { return nil, err } - if vpr != nil && vpr.RevertReason != nil { - return nil, newValidationRevertError(vpr) - } - return traceResult, err + return traceResult, nil } //////// copy-pasted code @@ -117,7 +83,15 @@ func (api *Rip7560API) chainContext(ctx context.Context) core.ChainContext { return ethapi.NewChainContext(ctx, api.backend) } -func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx *Context, block *types.Block, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, *core.ValidationPhaseResult, error) { +func (api *Rip7560API) traceTx( + ctx context.Context, + tx *types.Transaction, + txctx *Context, + block *types.Block, + vmctx vm.BlockContext, + statedb *state.StateDB, + config *TraceConfig, +) (interface{}, error) { var ( tracer *Tracer err error @@ -147,7 +121,7 @@ func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx // Define a meaningful timeout of a single transaction trace if config.Timeout != nil { if timeout, err = time.ParseDuration(*config.Timeout); err != nil { - return nil, nil, err + return nil, err } } deadlineCtx, cancel := context.WithTimeout(ctx, timeout) @@ -168,13 +142,12 @@ func (api *Rip7560API) traceTx(ctx context.Context, tx *types.Transaction, txctx // TODO: this is added to allow our bundler checking the 'TraceValidation' API is supported on Geth if tx.Rip7560TransactionData().Sender.Cmp(common.HexToAddress("0x0000000000000000000000000000000000000000")) == 0 { result, err := tracer.GetResult() - return result, nil, err + return result, err } - vpr, err := core.ApplyRip7560ValidationPhases(api.backend.ChainConfig(), api.chainContext(ctx), nil, gp, statedb, block.Header(), tx, vmenv.Config) + _, err = core.ApplyRip7560ValidationPhases(api.backend.ChainConfig(), api.chainContext(ctx), nil, gp, statedb, block.Header(), tx, vmenv.Config) if err != nil { - return nil, nil, fmt.Errorf("tracing failed: %w", err) + return nil, err } - result, err := tracer.GetResult() - return result, vpr, err + return tracer.GetResult() } diff --git a/miner/worker.go b/miner/worker.go index 2cab82899e1f..c63ad869c3ee 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -382,11 +382,13 @@ func (miner *Miner) commitRip7560TransactionsBundle(env *environment, txs *types } validatedTxs, receipts, _, err := core.HandleRip7560Transactions(txs.Transactions, 0, env.state, &env.coinbase, env.header, env.gasPool, miner.chainConfig, miner.chain, vm.Config{}) - + if err != nil { + return err + } env.txs = append(env.txs, validatedTxs...) env.receipts = append(env.receipts, receipts...) env.tcount += len(validatedTxs) - return err + return nil } // fillTransactions retrieves the pending transactions from the txpool and fills them From ceb084b8e518e9abe61f1dbb641f1ef92d42a313 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Tue, 20 Aug 2024 18:36:58 +0200 Subject: [PATCH 58/73] AA-422: Fix transaction hash function - add all missing fields (#32) * AA-422: Fix transaction hash function - add all missing fields --- core/types/transaction_signing_rip7560.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/core/types/transaction_signing_rip7560.go b/core/types/transaction_signing_rip7560.go index 99a5721619f1..e3fbd7d00006 100644 --- a/core/types/transaction_signing_rip7560.go +++ b/core/types/transaction_signing_rip7560.go @@ -29,17 +29,23 @@ func (s rip7560Signer) Hash(tx *Transaction) common.Hash { tx.Type(), []interface{}{ s.chainId, - tx.GasTipCap(), - tx.GasFeeCap(), - tx.Gas(), - tx.Data(), - tx.AccessList(), - + aatx.Nonce, + aatx.NonceKey, aatx.Sender, - aatx.PaymasterData, + aatx.Deployer, aatx.DeployerData, + aatx.Paymaster, + aatx.PaymasterData, + aatx.ExecutionData, aatx.BuilderFee, + tx.GasTipCap(), + tx.GasFeeCap(), aatx.ValidationGasLimit, aatx.PaymasterValidationGasLimit, + aatx.PostOpGas, + tx.Gas(), + tx.AccessList(), + + // no AuthorizationData here - this is hashing "for signing" }) } From 271943dd260a93d96fd265600d35222ce41b8768 Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Fri, 30 Aug 2024 18:20:48 +0300 Subject: [PATCH 59/73] AA-425: Directly execute call frames without "ApplyMessage" wrapper & AA-432: Penalization gas postOp observability (#33) * CallFrame * move checkNonce * refactor Nonce handling * execution phase * refactor prepare-msgs * refactor json-abi * fix errors * Make 'sumGas' accept vararg inputs * Extract 'performNonceCheckFrameRip7712' function * Update comment * Remove rudimentary modifications to the original ApplyMessage flow * Account for CallDataGas in 'postOp'; move code to Tx type interface --------- Co-authored-by: Alex Forshtat --- core/rip7560_abi.go | 77 ++++--- core/rip7560_abi_constants.go | 62 ++--- core/rip7712_nonce.go | 35 +-- core/state_processor_rip7560.go | 397 ++++++++++++-------------------- core/state_transition.go | 16 +- core/types/tx_rip7560.go | 60 ++++- 6 files changed, 291 insertions(+), 356 deletions(-) diff --git a/core/rip7560_abi.go b/core/rip7560_abi.go index cc4aaf02f1d1..a756b9204c59 100644 --- a/core/rip7560_abi.go +++ b/core/rip7560_abi.go @@ -2,6 +2,7 @@ package core import ( "errors" + "fmt" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -9,6 +10,8 @@ import ( "strings" ) +var Rip7560Abi, err = abi.JSON(strings.NewReader(Rip7560AbiJson)) + type AcceptAccountData struct { ValidAfter *big.Int ValidUntil *big.Int @@ -21,61 +24,73 @@ type AcceptPaymasterData struct { } func abiEncodeValidateTransaction(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { - jsonAbi, err := abi.JSON(strings.NewReader(ValidateTransactionAbi)) + + txAbiEncoding, err := tx.AbiEncode() if err != nil { return nil, err } - - txAbiEncoding, err := tx.AbiEncode() - validateTransactionData, err := jsonAbi.Pack("validateTransaction", big.NewInt(Rip7560AbiVersion), signingHash, txAbiEncoding) + validateTransactionData, err := Rip7560Abi.Pack("validateTransaction", big.NewInt(Rip7560AbiVersion), signingHash, txAbiEncoding) return validateTransactionData, err } func abiEncodeValidatePaymasterTransaction(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { - jsonAbi, err := abi.JSON(strings.NewReader(ValidatePaymasterTransactionAbi)) txAbiEncoding, err := tx.AbiEncode() - data, err := jsonAbi.Pack("validatePaymasterTransaction", big.NewInt(Rip7560AbiVersion), signingHash, txAbiEncoding) + if err != nil { + return nil, err + } + data, err := Rip7560Abi.Pack("validatePaymasterTransaction", big.NewInt(Rip7560AbiVersion), signingHash, txAbiEncoding) return data, err } -func abiEncodePostPaymasterTransaction(context []byte) ([]byte, error) { - jsonAbi, err := abi.JSON(strings.NewReader(PostPaymasterTransactionAbi)) +func abiEncodePostPaymasterTransaction(success bool, actualGasCost uint64, context []byte) []byte { + // TODO: pass actual gas cost parameter here! + postOpData, err := Rip7560Abi.Pack("postPaymasterTransaction", success, big.NewInt(int64(actualGasCost)), context) if err != nil { - return nil, err + panic("unable to encode postPaymasterTransaction") } - // TODO: pass actual gas cost parameter here! - postOpData, err := jsonAbi.Pack("postPaymasterTransaction", true, big.NewInt(0), context) - return postOpData, err + return postOpData } -func abiDecodeAcceptAccount(input []byte) (*AcceptAccountData, error) { - jsonAbi, err := abi.JSON(strings.NewReader(AcceptAccountAbi)) +func decodeMethodParamsToInterface(output interface{}, methodName string, input []byte) error { + m, err := Rip7560Abi.MethodById(input) if err != nil { - return nil, err + return fmt.Errorf("unable to decode %s: %w", methodName, err) } - methodSelector := new(big.Int).SetBytes(input[:4]).Uint64() - if methodSelector != AcceptAccountMethodSig { - if methodSelector == SigFailAccountMethodSig { - return nil, errors.New("account signature error") - } - return nil, errors.New("account validation did call the EntryPoint but not the 'acceptAccount' callback") + if methodName != m.Name { + return fmt.Errorf("unable to decode %s: got wrong method %s", methodName, m.Name) } - acceptAccountData := &AcceptAccountData{} - err = jsonAbi.UnpackIntoInterface(acceptAccountData, "acceptAccount", input[4:]) - return acceptAccountData, err + params, err := m.Inputs.Unpack(input[4:]) + if err != nil { + return fmt.Errorf("unable to decode %s: %w", methodName, err) + } + err = m.Inputs.Copy(output, params) + if err != nil { + return fmt.Errorf("unable to decode %s: %v", methodName, err) + } + return nil } -func abiDecodeAcceptPaymaster(input []byte) (*AcceptPaymasterData, error) { - jsonAbi, err := abi.JSON(strings.NewReader(AcceptPaymasterAbi)) +func abiDecodeAcceptAccount(input []byte, allowSigFail bool) (*AcceptAccountData, error) { + acceptAccountData := &AcceptAccountData{} + err := decodeMethodParamsToInterface(acceptAccountData, "acceptAccount", input) + if err != nil && allowSigFail { + err = decodeMethodParamsToInterface(acceptAccountData, "sigFailAccount", input) + } if err != nil { return nil, err } - methodSelector := new(big.Int).SetBytes(input[:4]).Uint64() - if methodSelector != AcceptPaymasterMethodSig { - return nil, errors.New("paymaster validation did call the EntryPoint but not the 'acceptPaymaster' callback") - } + return acceptAccountData, nil +} + +func abiDecodeAcceptPaymaster(input []byte, allowSigFail bool) (*AcceptPaymasterData, error) { acceptPaymasterData := &AcceptPaymasterData{} - err = jsonAbi.UnpackIntoInterface(acceptPaymasterData, "acceptPaymaster", input[4:]) + err := decodeMethodParamsToInterface(acceptPaymasterData, "acceptPaymaster", input) + if err != nil && allowSigFail { + err = decodeMethodParamsToInterface(acceptPaymasterData, "sigFailPaymaster", input) + } + if err != nil { + return nil, err + } if len(acceptPaymasterData.Context) > PaymasterMaxContextSize { return nil, errors.New("paymaster return data: context too large") } diff --git a/core/rip7560_abi_constants.go b/core/rip7560_abi_constants.go index da7ff2838779..e324361baad8 100644 --- a/core/rip7560_abi_constants.go +++ b/core/rip7560_abi_constants.go @@ -1,12 +1,17 @@ package core -const AcceptAccountMethodSig = uint64(0x1256ebd1) // acceptAccount(uint256,uint256) -const AcceptPaymasterMethodSig = uint64(0x03be8439) // acceptPaymaster(uint256,uint256,bytes) -const SigFailAccountMethodSig = uint64(0x7715fac2) // sigFailAccount(uint256,uint256) +import "github.com/ethereum/go-ethereum/common" + const PaymasterMaxContextSize = 65536 const Rip7560AbiVersion = 0 -const ValidateTransactionAbi = ` +var AA_ENTRY_POINT = common.HexToAddress("0x0000000000000000000000000000000000007560") +var AA_SENDER_CREATOR = common.HexToAddress("0x00000000000000000000000000000000ffff7560") + +// always pay 10% of unused execution gas +const AA_GAS_PENALTY_PCT = 10 + +const Rip7560AbiJson = ` [ { "type":"function", @@ -16,11 +21,7 @@ const ValidateTransactionAbi = ` {"name": "txHash","type": "bytes32"}, {"name": "transaction","type": "bytes"} ] - } -]` - -const ValidatePaymasterTransactionAbi = ` -[ + }, { "type":"function", "name":"validatePaymasterTransaction", @@ -29,11 +30,7 @@ const ValidatePaymasterTransactionAbi = ` {"name": "txHash","type": "bytes32"}, {"name": "transaction","type": "bytes"} ] - } -]` - -const PostPaymasterTransactionAbi = ` -[ + }, { "type":"function", "name":"postPaymasterTransaction", @@ -42,31 +39,36 @@ const PostPaymasterTransactionAbi = ` {"name": "actualGasCost","type": "uint256"}, {"name": "context","type": "bytes"} ] - } -]` - -// AcceptAccountAbi Note that this is not a true ABI of the "acceptAccount" function. -// This ABI swaps inputs and outputs to simplify the ABI decoding. -const AcceptAccountAbi = ` -[ + }, { "type":"function", "name":"acceptAccount", - "outputs": [ + "inputs": [ {"name": "validAfter","type": "uint256"}, {"name": "validUntil","type": "uint256"} ] - } -]` - -// AcceptPaymasterAbi Note that this is not a true ABI of the "acceptPaymaster" function. -// This ABI swaps inputs and outputs to simplify the ABI decoding. -const AcceptPaymasterAbi = ` -[ + }, { "type":"function", "name":"acceptPaymaster", - "outputs": [ + "inputs": [ + {"name": "validAfter","type": "uint256"}, + {"name": "validUntil","type": "uint256"}, + {"name": "context","type": "bytes"} + ] + }, + { + "type":"function", + "name":"sigFailAccount", + "inputs": [ + {"name": "validAfter","type": "uint256"}, + {"name": "validUntil","type": "uint256"} + ] + }, + { + "type":"function", + "name":"sigFailPaymaster", + "inputs": [ {"name": "validAfter","type": "uint256"}, {"name": "validUntil","type": "uint256"}, {"name": "context","type": "bytes"} diff --git a/core/rip7712_nonce.go b/core/rip7712_nonce.go index e77388ed2325..1cdb9c33fdfa 100644 --- a/core/rip7712_nonce.go +++ b/core/rip7712_nonce.go @@ -2,39 +2,20 @@ package core import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" - "github.com/holiman/uint256" "math/big" + "slices" ) // TODO: accept address as configuration parameter var AA_NONCE_MANAGER = common.HexToAddress("0x63f63e798f5F6A934Acf0a3FD1C01f3Fac851fF0") -func prepareNonceManagerMessage(baseTx *types.Transaction) *Message { - // TODO: this can probably be done a lot easier, check syntax - tx := baseTx.Rip7560TransactionData() - nonceKey := make([]byte, 24) - nonce := make([]byte, 8) - nonceKey256, _ := uint256.FromBig(tx.NonceKey) - nonce256 := uint256.NewInt(tx.Nonce) - nonceKey256.WriteToSlice(nonceKey) - nonce256.WriteToSlice(nonce) +func prepareNonceManagerMessage(tx *types.Rip7560AccountAbstractionTx) []byte { - nonceManagerData := make([]byte, 0) - nonceManagerData = append(nonceManagerData[:], tx.Sender.Bytes()...) - nonceManagerData = append(nonceManagerData[:], nonceKey...) - nonceManagerData = append(nonceManagerData[:], nonce...) - return &Message{ - From: AA_ENTRY_POINT, - To: &AA_NONCE_MANAGER, - Value: big.NewInt(0), - GasLimit: 100000, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: nonceManagerData, - AccessList: make(types.AccessList, 0), - SkipAccountChecks: true, - IsRip7560Frame: true, - } + return slices.Concat( + tx.Sender.Bytes(), + math.PaddedBigBytes(tx.NonceKey, 24), + math.PaddedBigBytes(big.NewInt(int64(tx.Nonce)), 8), + ) } diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 4553c53d4ae4..96ff30296eb4 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" @@ -15,9 +16,6 @@ import ( "math/big" ) -var AA_ENTRY_POINT = common.HexToAddress("0x0000000000000000000000000000000000007560") -var AA_SENDER_CREATOR = common.HexToAddress("0x00000000000000000000000000000000ffff7560") - type EntryPointCall struct { OnEnterSuper tracing.EnterHook Input []byte @@ -32,6 +30,7 @@ type ValidationPhaseResult struct { PaymasterContext []byte PreCharge *uint256.Int EffectiveGasPrice *uint256.Int + CallDataUsedGas uint64 NonceManagerUsedGas uint64 DeploymentUsedGas uint64 ValidationUsedGas uint64 @@ -147,36 +146,30 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta return validatedTransactions, receipts, allLogs, nil } -// todo: move to a suitable interface, whatever that is -// todo 2: maybe handle the "shared gas pool" situation instead of just overriding it completely? -func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB, gasPrice *uint256.Int) (*uint256.Int, error) { - gasLimit := st.Gas + st.ValidationGasLimit + st.PaymasterValidationGasLimit + st.PostOpGas +func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB, gasPrice *uint256.Int) (uint64, *uint256.Int, error) { + gasLimit, err := st.TotalGasLimit() + if err != nil { + return 0, nil, err + } + + //TODO: check gasLimit against block gasPool preCharge := new(uint256.Int).SetUint64(gasLimit) preCharge = preCharge.Mul(preCharge, gasPrice) balanceCheck := new(uint256.Int).Set(preCharge) - chargeFrom := st.Sender - - if st.Paymaster != nil && st.Paymaster.Cmp(common.Address{}) != 0 { - chargeFrom = st.Paymaster - } + chargeFrom := st.GasPayer() if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { - return nil, fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) + return 0, nil, fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) } state.SubBalance(*chargeFrom, preCharge, 0) - return preCharge, nil + return gasLimit, preCharge, nil } // refund the transaction payer (either account or paymaster) with the excess gas cost func refundPayer(vpr *ValidationPhaseResult, state vm.StateDB, gasUsed uint64) { - var chargeFrom *common.Address - if vpr.PmValidationUsedGas == 0 { - chargeFrom = vpr.Tx.Rip7560TransactionData().Sender - } else { - chargeFrom = vpr.Tx.Rip7560TransactionData().Paymaster - } + var chargeFrom = vpr.Tx.Rip7560TransactionData().GasPayer() actualGasCost := new(uint256.Int).Mul(vpr.EffectiveGasPrice, new(uint256.Int).SetUint64(gasUsed)) @@ -185,26 +178,55 @@ func refundPayer(vpr *ValidationPhaseResult, state vm.StateDB, gasUsed uint64) { state.AddBalance(*chargeFrom, refund, tracing.BalanceIncreaseGasReturn) } -// CheckNonceRip7560 pre-checks nonce of RIP-7560 transaction that don't rely on RIP-7712 two-dimensional nonces. -// (standard preCheck function check both nonce and no-code of account) -// Make sure this transaction's nonce is correct. -func CheckNonceRip7560(tx *types.Rip7560AccountAbstractionTx, st *state.StateDB) error { - // RIP-7712 two-dimensional nonce is checked on-chain +// CheckNonceRip7560 checks nonce of RIP-7560 transactions. +// Transactions that don't rely on RIP-7712 two-dimensional nonces are checked statically. +// Transactions using RIP-7712 two-dimensional nonces execute an extra validation frame on-chain. +func CheckNonceRip7560(st *StateTransition, tx *types.Rip7560AccountAbstractionTx) (uint64, error) { if tx.IsRip7712Nonce() { - return nil + return performNonceCheckFrameRip7712(st, tx) } - stNonce := st.GetNonce(*tx.Sender) + stNonce := st.state.GetNonce(*tx.Sender) if msgNonce := tx.Nonce; stNonce < msgNonce { - return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, + return 0, fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, tx.Sender.Hex(), msgNonce, stNonce) } else if stNonce > msgNonce { - return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, + return 0, fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, tx.Sender.Hex(), msgNonce, stNonce) } else if stNonce+1 < stNonce { - return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax, + return 0, fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax, tx.Sender.Hex(), stNonce) } - return nil + return 0, nil +} + +func performNonceCheckFrameRip7712(st *StateTransition, tx *types.Rip7560AccountAbstractionTx) (uint64, error) { + if !st.evm.ChainConfig().IsRIP7712(st.evm.Context.BlockNumber) { + return 0, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce is disabled"), nil, nil) + } + nonceManagerMessageData := prepareNonceManagerMessage(tx) + resultNonceManager := CallFrame(st, &AA_ENTRY_POINT, &AA_NONCE_MANAGER, nonceManagerMessageData, st.gasRemaining) + if resultNonceManager.Failed() { + return 0, newValidationPhaseError( + fmt.Errorf("RIP-7712 nonce validation failed: %w", resultNonceManager.Err), + resultNonceManager.ReturnData, + ptr("NonceManager"), + ) + } + return resultNonceManager.UsedGas, nil +} + +// call a frame in the context of this state transition. +func CallFrame(st *StateTransition, from *common.Address, to *common.Address, data []byte, gasLimit uint64) *ExecutionResult { + sender := vm.AccountRef(*from) + retData, gasRemaining, err := st.evm.Call(sender, *to, data, gasLimit, uint256.NewInt(0)) + usedGas := gasLimit - gasRemaining + st.gasRemaining -= usedGas + + return &ExecutionResult{ + ReturnData: retData, + UsedGas: usedGas, + Err: err, + } } func ptr(s string) *string { return &s } @@ -220,10 +242,6 @@ func ApplyRip7560ValidationPhases( cfg vm.Config, ) (*ValidationPhaseResult, error) { aatx := tx.Rip7560TransactionData() - err := CheckNonceRip7560(aatx, statedb) - if err != nil { - return nil, newValidationPhaseError(err, nil, nil) - } gasPrice := new(big.Int).Add(header.BaseFee, tx.GasTipCap()) if gasPrice.Cmp(tx.GasFeeCap()) > 0 { @@ -231,7 +249,7 @@ func ApplyRip7560ValidationPhases( } gasPriceUint256, _ := uint256.FromBig(gasPrice) - preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) + gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) if err != nil { return nil, newValidationPhaseError(err, nil, nil) } @@ -261,41 +279,23 @@ func ApplyRip7560ValidationPhases( evm.Config.Tracer.OnTxStart(evm.GetVMContext(), tx, common.Address{}) } + st := NewStateTransition(evm, nil, gp) + st.initialGas = gasLimit + st.gasRemaining = gasLimit + /*** Nonce Manager Frame ***/ - var nonceManagerUsedGas uint64 - if aatx.IsRip7712Nonce() { - if !chainConfig.IsRIP7712(header.Number) { - return nil, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce is disabled"), nil, nil) - } - nonceManagerMessage := prepareNonceManagerMessage(tx) - resultNonceManager, err := ApplyMessage(evm, nonceManagerMessage, gp) - if err != nil { - return nil, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce validation failed: %w", err), nil, nil) - } - if resultNonceManager.Failed() { - return nil, newValidationPhaseError( - fmt.Errorf("RIP-7712 nonce validation failed: %w", resultNonceManager.Err), - resultNonceManager.ReturnData, - ptr("NonceManager"), - ) - } - nonceManagerUsedGas = resultNonceManager.UsedGas + nonceManagerUsedGas, err := CheckNonceRip7560(st, aatx) + if err != nil { + return nil, err } /*** Deployer Frame ***/ - deployerMsg := prepareDeployerMessage(tx, chainConfig) var deploymentUsedGas uint64 - if deployerMsg != nil { - var err error - var resultDeployer *ExecutionResult + if aatx.Deployer != nil { if statedb.GetCodeSize(*sender) != 0 { - err = errors.New("sender already deployed") - } else { - resultDeployer, err = ApplyMessage(evm, deployerMsg, gp) - } - if err != nil { - return nil, newValidationPhaseError(fmt.Errorf("account deployment failed: %v", err), nil, nil) + return nil, fmt.Errorf("account deployment failed: already deployed") } + resultDeployer := CallFrame(st, &AA_SENDER_CREATOR, aatx.Deployer, aatx.DeployerData, aatx.ValidationGasLimit) if resultDeployer.Failed() { return nil, newValidationPhaseError( resultDeployer.Err, @@ -306,8 +306,8 @@ func ApplyRip7560ValidationPhases( if statedb.GetCodeSize(*sender) == 0 { return nil, newValidationPhaseError( fmt.Errorf( - "account was not deployed by a factory, account:%s factory%s", - sender.String(), deployerMsg.To.String(), + "sender not deployed by factory, sender:%s factory:%s", + sender.String(), aatx.Deployer.String(), ), nil, nil) } deploymentUsedGas = resultDeployer.UsedGas @@ -326,11 +326,11 @@ func ApplyRip7560ValidationPhases( /*** Account Validation Frame ***/ signer := types.MakeSigner(chainConfig, header.Number, header.Time) signingHash := signer.Hash(tx) - accountValidationMsg, err := prepareAccountValidationMessage(tx, chainConfig, signingHash, deploymentUsedGas) - resultAccountValidation, err := ApplyMessage(evm, accountValidationMsg, gp) + accountValidationMsg, err := prepareAccountValidationMessage(aatx, signingHash) if err != nil { return nil, newValidationPhaseError(err, nil, nil) } + resultAccountValidation := CallFrame(st, &AA_ENTRY_POINT, aatx.Sender, accountValidationMsg, aatx.ValidationGasLimit-deploymentUsedGas) if resultAccountValidation.Failed() { return nil, newValidationPhaseError( resultAccountValidation.Err, @@ -353,45 +353,49 @@ func ApplyRip7560ValidationPhases( return nil, newValidationPhaseError(err, nil, nil) } - paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, vpe := applyPaymasterValidationFrame(epc, tx, chainConfig, signingHash, evm, gp, statedb, header) - if vpe != nil { - return nil, vpe - } - - vpr := &ValidationPhaseResult{} - vpr.Tx = tx - vpr.TxHash = tx.Hash() - vpr.PreCharge = preCharge - vpr.EffectiveGasPrice = gasPriceUint256 - vpr.PaymasterContext = paymasterContext - vpr.DeploymentUsedGas = deploymentUsedGas - vpr.NonceManagerUsedGas = nonceManagerUsedGas - vpr.ValidationUsedGas = resultAccountValidation.UsedGas - vpr.PmValidationUsedGas = pmValidationUsedGas - vpr.SenderValidAfter = aad.ValidAfter.Uint64() - vpr.SenderValidUntil = aad.ValidUntil.Uint64() - vpr.PmValidAfter = pmValidAfter - vpr.PmValidUntil = pmValidUntil + paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(st, epc, tx, signingHash, header) + if err != nil { + return nil, err + } + + callDataUsedGas, err := aatx.CallDataGasCost() + if err != nil { + return nil, err + } + vpr := &ValidationPhaseResult{ + Tx: tx, + TxHash: tx.Hash(), + PreCharge: preCharge, + EffectiveGasPrice: gasPriceUint256, + PaymasterContext: paymasterContext, + CallDataUsedGas: callDataUsedGas, + DeploymentUsedGas: deploymentUsedGas, + NonceManagerUsedGas: nonceManagerUsedGas, + ValidationUsedGas: resultAccountValidation.UsedGas, + PmValidationUsedGas: pmValidationUsedGas, + SenderValidAfter: aad.ValidAfter.Uint64(), + SenderValidUntil: aad.ValidUntil.Uint64(), + PmValidAfter: pmValidAfter, + PmValidUntil: pmValidUntil, + } statedb.Finalise(true) return vpr, nil } -func applyPaymasterValidationFrame(epc *EntryPointCall, tx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *types.Header) ([]byte, uint64, uint64, uint64, error) { +func applyPaymasterValidationFrame(st *StateTransition, epc *EntryPointCall, tx *types.Transaction, signingHash common.Hash, header *types.Header) ([]byte, uint64, uint64, uint64, error) { /*** Paymaster Validation Frame ***/ aatx := tx.Rip7560TransactionData() var pmValidationUsedGas uint64 - paymasterMsg, err := preparePaymasterValidationMessage(tx, chainConfig, signingHash) + paymasterMsg, err := preparePaymasterValidationMessage(aatx, signingHash) if err != nil { return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) } if paymasterMsg == nil { return nil, 0, 0, 0, nil } - resultPm, err := ApplyMessage(evm, paymasterMsg, gp) - if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) - } + resultPm := CallFrame(st, &AA_ENTRY_POINT, aatx.Paymaster, paymasterMsg, aatx.PaymasterValidationGasLimit) + if resultPm.Failed() { return nil, 0, 0, 0, newValidationPhaseError( resultPm.Err, @@ -411,70 +415,61 @@ func applyPaymasterValidationFrame(epc *EntryPointCall, tx *types.Transaction, c return apd.Context, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil } -func applyPaymasterPostOpFrame(vpr *ValidationPhaseResult, executionResult *ExecutionResult, evm *vm.EVM, gp *GasPool) (*ExecutionResult, error) { +func applyPaymasterPostOpFrame(st *StateTransition, aatx *types.Rip7560AccountAbstractionTx, vpr *ValidationPhaseResult, success bool, gasUsed uint64) *ExecutionResult { var paymasterPostOpResult *ExecutionResult - paymasterPostOpMsg, err := preparePostOpMessage(vpr, evm.ChainConfig(), executionResult) - if err != nil { - return nil, err - } - paymasterPostOpResult, err = ApplyMessage(evm, paymasterPostOpMsg, gp) - if err != nil { - return nil, err - } - return paymasterPostOpResult, nil + paymasterPostOpMsg := preparePostOpMessage(vpr, success, gasUsed) + paymasterPostOpResult = CallFrame(st, &AA_ENTRY_POINT, aatx.Paymaster, paymasterPostOpMsg, aatx.PostOpGas) + return paymasterPostOpResult } func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhaseResult, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, cfg vm.Config) (*types.Receipt, error) { - beforeExecSnapshotId := statedb.Snapshot() blockContext := NewEVMBlockContext(header, bc, author) - message, err := TransactionToMessage(vpr.Tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) - txContext := NewEVMTxContext(message) - txContext.Origin = *vpr.Tx.Rip7560TransactionData().Sender + aatx := vpr.Tx.Rip7560TransactionData() + sender := aatx.Sender + txContext := vm.TxContext{ + Origin: *sender, + GasPrice: vpr.EffectiveGasPrice.ToBig(), + } + txContext.Origin = *aatx.Sender evm := vm.NewEVM(blockContext, txContext, statedb, config, cfg) + st := NewStateTransition(evm, nil, gp) + st.initialGas = math.MaxUint64 + st.gasRemaining = math.MaxUint64 - accountExecutionMsg := prepareAccountExecutionMessage(vpr.Tx, evm.ChainConfig()) - executionResult, err := ApplyMessage(evm, accountExecutionMsg, gp) - executionAL := statedb.AccessListCopy() - if err != nil { - return nil, err - } - beforePostSnapshotId := statedb.Snapshot() - var paymasterPostOpResult *ExecutionResult + accountExecutionMsg := prepareAccountExecutionMessage(vpr.Tx) + beforeExecSnapshotId := statedb.Snapshot() + executionResult := CallFrame(st, &AA_ENTRY_POINT, sender, accountExecutionMsg, aatx.Gas) + executionStatus := types.ReceiptStatusSuccessful + if executionResult.Failed() { + executionStatus = types.ReceiptStatusFailed + } + executionGasPenalty := (aatx.Gas - executionResult.UsedGas) * AA_GAS_PENALTY_PCT / 100 + + gasUsed := vpr.ValidationUsedGas + + vpr.NonceManagerUsedGas + + vpr.DeploymentUsedGas + + vpr.PmValidationUsedGas + + vpr.CallDataUsedGas + + executionResult.UsedGas + + executionGasPenalty + + var postOpGasUsed uint64 if len(vpr.PaymasterContext) != 0 { - paymasterPostOpResult, err = applyPaymasterPostOpFrame(vpr, executionResult, evm, gp) - } - - // PostOp failed, reverting execution changes - if paymasterPostOpResult != nil && paymasterPostOpResult.Err != nil { - statedb.RevertToSnapshot(beforePostSnapshotId) - // Workaround a bug in snapshot/revert - can't be called after multiple ApplyMessage() calls - statedb.SetAccessList(executionAL) - statedb.RevertToSnapshot(beforeExecSnapshotId) - } - - if err != nil { - return nil, err - } - - gasUsed := - vpr.ValidationUsedGas + - vpr.NonceManagerUsedGas + - vpr.DeploymentUsedGas + - vpr.PmValidationUsedGas + - executionResult.UsedGas - if paymasterPostOpResult != nil { - gasUsed += - paymasterPostOpResult.UsedGas + paymasterPostOpResult := applyPaymasterPostOpFrame(st, aatx, vpr, !executionResult.Failed(), gasUsed) + postOpGasUsed = paymasterPostOpResult.UsedGas + // PostOp failed, reverting execution changes + if paymasterPostOpResult.Err != nil { + statedb.RevertToSnapshot(beforeExecSnapshotId) + executionStatus = types.ReceiptStatusFailed + } + postOpGasPenalty := (aatx.PostOpGas - postOpGasUsed) * AA_GAS_PENALTY_PCT / 100 + gasUsed += postOpGasUsed + postOpGasPenalty } receipt := &types.Receipt{Type: vpr.Tx.Type(), TxHash: vpr.Tx.Hash(), GasUsed: gasUsed, CumulativeGasUsed: gasUsed} - if executionResult.Failed() || (paymasterPostOpResult != nil && paymasterPostOpResult.Failed()) { - receipt.Status = types.ReceiptStatusFailed - } else { - receipt.Status = types.ReceiptStatusSuccessful - } + receipt.Status = executionStatus refundPayer(vpr, statedb, gasUsed) @@ -484,113 +479,27 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.TransactionIndex = uint(vpr.TxIndex) // other fields are filled in DeriveFields (all tx, block fields, and updating CumulativeGasUsed - return receipt, err -} - -func prepareDeployerMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { - tx := baseTx.Rip7560TransactionData() - if tx.Deployer == nil || tx.Deployer.Cmp(common.Address{}) == 0 { - return nil - } - return &Message{ - From: AA_SENDER_CREATOR, - To: tx.Deployer, - Value: big.NewInt(0), - GasLimit: tx.ValidationGasLimit, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: tx.DeployerData, - AccessList: nil, - SkipAccountChecks: true, - IsRip7560Frame: true, - } + return receipt, nil } -func prepareAccountValidationMessage(baseTx *types.Transaction, chainConfig *params.ChainConfig, signingHash common.Hash, deploymentUsedGas uint64) (*Message, error) { - tx := baseTx.Rip7560TransactionData() - data, err := abiEncodeValidateTransaction(tx, signingHash) - if err != nil { - return nil, err - } - return &Message{ - From: AA_ENTRY_POINT, - To: tx.Sender, - Value: big.NewInt(0), - GasLimit: tx.ValidationGasLimit - deploymentUsedGas, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: data, - AccessList: nil, - SkipAccountChecks: true, - IsRip7560Frame: true, - }, nil +func prepareAccountValidationMessage(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { + return abiEncodeValidateTransaction(tx, signingHash) } -func preparePaymasterValidationMessage(baseTx *types.Transaction, config *params.ChainConfig, signingHash common.Hash) (*Message, error) { - tx := baseTx.Rip7560TransactionData() +func preparePaymasterValidationMessage(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { if tx.Paymaster == nil || tx.Paymaster.Cmp(common.Address{}) == 0 { return nil, nil } - data, err := abiEncodeValidatePaymasterTransaction(tx, signingHash) - if err != nil { - return nil, err - } - return &Message{ - From: AA_ENTRY_POINT, - To: tx.Paymaster, - Value: big.NewInt(0), - GasLimit: tx.PaymasterValidationGasLimit, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: data, - AccessList: nil, - SkipAccountChecks: true, - IsRip7560Frame: true, - }, nil + return abiEncodeValidatePaymasterTransaction(tx, signingHash) } -func prepareAccountExecutionMessage(baseTx *types.Transaction, config *params.ChainConfig) *Message { +func prepareAccountExecutionMessage(baseTx *types.Transaction) []byte { tx := baseTx.Rip7560TransactionData() - return &Message{ - From: AA_ENTRY_POINT, - To: tx.Sender, - Value: big.NewInt(0), - GasLimit: tx.Gas, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: tx.ExecutionData, - AccessList: nil, - SkipAccountChecks: true, - IsRip7560Frame: true, - } + return tx.ExecutionData } -func preparePostOpMessage(vpr *ValidationPhaseResult, chainConfig *params.ChainConfig, executionResult *ExecutionResult) (*Message, error) { - if len(vpr.PaymasterContext) == 0 { - return nil, nil - } - tx := vpr.Tx.Rip7560TransactionData() - postOpData, err := abiEncodePostPaymasterTransaction(vpr.PaymasterContext) - if err != nil { - return nil, err - } - return &Message{ - From: AA_ENTRY_POINT, - To: tx.Paymaster, - Value: big.NewInt(0), - GasLimit: tx.PaymasterValidationGasLimit - executionResult.UsedGas, - GasPrice: tx.GasFeeCap, - GasFeeCap: tx.GasFeeCap, - GasTipCap: tx.GasTipCap, - Data: postOpData, - AccessList: tx.AccessList, - SkipAccountChecks: true, - IsRip7560Frame: true, - }, nil +func preparePostOpMessage(vpr *ValidationPhaseResult, success bool, gasUsed uint64) []byte { + return abiEncodePostPaymasterTransaction(success, gasUsed, vpr.PaymasterContext) } func validateAccountEntryPointCall(epc *EntryPointCall, sender *common.Address) (*AcceptAccountData, error) { @@ -600,13 +509,10 @@ func validateAccountEntryPointCall(epc *EntryPointCall, sender *common.Address) if epc.Input == nil { return nil, errors.New("account validation did not call the EntryPoint 'acceptAccount' callback") } - if len(epc.Input) != 68 { - return nil, errors.New("invalid account return data length") - } if epc.From.Cmp(*sender) != 0 { return nil, errors.New("invalid call to EntryPoint contract from a wrong account address") } - return abiDecodeAcceptAccount(epc.Input) + return abiDecodeAcceptAccount(epc.Input, false) } func validatePaymasterEntryPointCall(epc *EntryPointCall, paymaster *common.Address) (*AcceptPaymasterData, error) { @@ -617,13 +523,10 @@ func validatePaymasterEntryPointCall(epc *EntryPointCall, paymaster *common.Addr return nil, errors.New("paymaster validation did not call the EntryPoint 'acceptPaymaster' callback") } - if len(epc.Input) < 100 { - return nil, errors.New("invalid paymaster callback data length") - } if epc.From.Cmp(*paymaster) != 0 { return nil, errors.New("invalid call to EntryPoint contract from a wrong paymaster address") } - apd, err := abiDecodeAcceptPaymaster(epc.Input) + apd, err := abiDecodeAcceptPaymaster(epc.Input, false) if err != nil { return nil, err } diff --git a/core/state_transition.go b/core/state_transition.go index 5329386e131b..3bb91c2f77b5 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -149,7 +149,6 @@ type Message struct { // account nonce in state. It also disables checking that the sender is an EOA. // This field will be set to true for operations like RPC eth_call. SkipAccountChecks bool - IsRip7560Frame bool } // TransactionToMessage converts a transaction into a Message. @@ -361,15 +360,6 @@ func (st *StateTransition) preCheck() error { } } } - - // no need to "buy gus" for individual frames - // there is a single shared gas pre-charge - if st.msg.IsRip7560Frame { - st.gasRemaining += st.msg.GasLimit - st.initialGas = st.msg.GasLimit - return nil - } - return st.buyGas() } @@ -407,7 +397,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ) // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(msg.Data, msg.AccessList, contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsShanghai, msg.IsRip7560Frame) + gas, err := IntrinsicGas(msg.Data, msg.AccessList, contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsShanghai) if err != nil { return nil, err } @@ -454,9 +444,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, value) } else { // Increment the nonce for the next transaction - if msg.From != AA_SENDER_CREATOR && msg.From != AA_ENTRY_POINT { - st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) - } + st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, value) } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 772afa909fff..7887292a017c 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -18,8 +18,10 @@ package types import ( "bytes" + "fmt" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "math/big" ) @@ -109,6 +111,50 @@ func (tx *Rip7560AccountAbstractionTx) value() *big.Int { return big.NewI func (tx *Rip7560AccountAbstractionTx) nonce() uint64 { return tx.Nonce } func (tx *Rip7560AccountAbstractionTx) to() *common.Address { return nil } +func (tx *Rip7560AccountAbstractionTx) GasPayer() *common.Address { + if tx.Paymaster != nil && tx.Paymaster.Cmp(common.Address{}) != 0 { + return tx.Paymaster + } + return tx.Sender +} + +func sumGas(vals ...uint64) (uint64, error) { + var sum uint64 + for _, val := range vals { + if val > 1<<62 { + return 0, fmt.Errorf("invalid gas values") + } + sum += val + } + return sum, nil +} + +func callDataCost(data []byte) uint64 { + z := uint64(0) + for i := 0; i < len(data); i++ { + if data[i] == 0 { + z++ + } + } + nz := uint64(len(data)) - z + return nz*params.TxDataNonZeroGasEIP2028 + z*params.TxDataZeroGas +} + +func (tx *Rip7560AccountAbstractionTx) CallDataGasCost() (uint64, error) { + return sumGas(callDataCost(tx.DeployerData), callDataCost(tx.ExecutionData), callDataCost(tx.PaymasterData)) +} + +func (tx *Rip7560AccountAbstractionTx) TotalGasLimit() (uint64, error) { + callDataGasCost, err := tx.CallDataGasCost() + if err != nil { + return 0, err + } + return sumGas( + tx.Gas, tx.ValidationGasLimit, tx.PaymasterValidationGasLimit, tx.PostOpGas, + callDataGasCost, + ) +} + // IsRip7712Nonce returns true if the transaction uses an RIP-7712 two-dimensional nonce func (tx *Rip7560AccountAbstractionTx) IsRip7712Nonce() bool { return tx.NonceKey != nil && tx.NonceKey.Cmp(big.NewInt(0)) == 1 @@ -134,16 +180,16 @@ func (tx *Rip7560AccountAbstractionTx) setSignatureValues(chainID, v, r, s *big. } // encode the subtype byte and the payload-bearing bytes of the RIP-7560 transaction -func (t *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { +func (tx *Rip7560AccountAbstractionTx) encode(b *bytes.Buffer) error { zeroAddress := common.Address{} - tx := t.copy().(*Rip7560AccountAbstractionTx) - if tx.Paymaster != nil && zeroAddress.Cmp(*tx.Paymaster) == 0 { - tx.Paymaster = nil + txCopy := tx.copy().(*Rip7560AccountAbstractionTx) + if txCopy.Paymaster != nil && zeroAddress.Cmp(*txCopy.Paymaster) == 0 { + txCopy.Paymaster = nil } - if tx.Deployer != nil && zeroAddress.Cmp(*tx.Deployer) == 0 { - tx.Deployer = nil + if txCopy.Deployer != nil && zeroAddress.Cmp(*txCopy.Deployer) == 0 { + txCopy.Deployer = nil } - return rlp.Encode(b, tx) + return rlp.Encode(b, txCopy) } // decode the payload-bearing bytes of the encoded RIP-7560 transaction payload From 784d368a15ee5ff5187a675d296a7e3ef973c832 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Wed, 11 Sep 2024 16:16:06 +0200 Subject: [PATCH 60/73] AA-344: During block building simply skip invalid RIP-7560 transactions (#35) * AA-344: (WIP) During block building simply skip invalid RIP-7560 transactions * Create 'eth_getRip7560TransactionDebugInfo' API to observe late invalidation * Remove unnecessary override --- core/blockchain.go | 26 ++++++++ core/state_processor.go | 2 +- core/state_processor_rip7560.go | 101 ++++++++++++++++++++++++-------- core/types/tx_rip7560.go | 7 +++ eth/api_backend_rip7560.go | 19 ++++++ internal/ethapi/backend.go | 6 ++ internal/ethapi/rip7560api.go | 4 ++ miner/worker.go | 3 +- 8 files changed, 141 insertions(+), 27 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 7c8ab3abc44a..956bee2260a4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -258,6 +258,9 @@ type BlockChain struct { forker *ForkChoice vmConfig vm.Config logger *tracing.Hooks + + // note: added to assist debugging in case of a failed validation after bundler performed second validation + rip7560TransactionDebugInfos []*types.Rip7560TransactionDebugInfo } // NewBlockChain returns a fully initialised block chain using information @@ -2535,3 +2538,26 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } + +// GetRip7560TransactionDebugInfo debug method for RIP-7560 +func (bc *BlockChain) GetRip7560TransactionDebugInfo(hash common.Hash) *types.Rip7560TransactionDebugInfo { + for i := 0; i < len(bc.rip7560TransactionDebugInfos); i++ { + info := bc.rip7560TransactionDebugInfos[i] + if info.TxHash.Cmp(hash) == 0 { + return info + } + } + return nil +} + +// SetRip7560TransactionDebugInfo debug method for RIP-7560 +func (bc *BlockChain) SetRip7560TransactionDebugInfo(infos []*types.Rip7560TransactionDebugInfo) { + if infos == nil { + return + } + // TODO: use LRU cache or any other sane way to limit the + if len(bc.rip7560TransactionDebugInfos) > 30 { + bc.rip7560TransactionDebugInfos = make([]*types.Rip7560TransactionDebugInfo, 0) + } + bc.rip7560TransactionDebugInfos = append(bc.rip7560TransactionDebugInfos, infos...) +} diff --git a/core/state_processor.go b/core/state_processor.go index ee6f5f77f939..6be7f5009707 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -85,7 +85,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if tx.Type() == types.Rip7560Type { // HandleRip7560Transactions accepts a transaction array and in the future bundle handling will need this tmpTxs := [1]*types.Transaction{tx} - _, validatedTxsReceipts, validateTxsLogs, err := HandleRip7560Transactions(tmpTxs[:], 0, statedb, &context.Coinbase, header, gp, p.config, p.bc, cfg) + _, validatedTxsReceipts, _, validateTxsLogs, err := HandleRip7560Transactions(tmpTxs[:], 0, statedb, &context.Coinbase, header, gp, p.config, p.bc, cfg, false) receipts = append(receipts, validatedTxsReceipts...) allLogs = append(allLogs, validateTxsLogs...) if err != nil { diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 96ff30296eb4..489745fe40b2 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" "math/big" @@ -46,10 +47,9 @@ type ValidationPhaseResult struct { type ValidationPhaseError struct { error reason string // revert reason hex encoded -} -func (v *ValidationPhaseError) Error() string { - return v.error.Error() + revertEntityName *string + frameReverted bool } func (v *ValidationPhaseError) ErrorData() interface{} { @@ -61,6 +61,7 @@ func newValidationPhaseError( innerErr error, revertReason []byte, revertEntityName *string, + frameReverted bool, ) *ValidationPhaseError { var errorMessage string contractSubst := "" @@ -86,30 +87,58 @@ func newValidationPhaseError( return &ValidationPhaseError{ error: err, reason: hexutil.Encode(revertReason), + + frameReverted: frameReverted, + revertEntityName: revertEntityName, } } -// HandleRip7560Transactions apply state changes of all sequential RIP-7560 transactions and return -// the number of handled transactions -// the transactions array must start with the RIP-7560 transaction -func HandleRip7560Transactions(transactions []*types.Transaction, index int, statedb *state.StateDB, coinbase *common.Address, header *types.Header, gp *GasPool, chainConfig *params.ChainConfig, bc ChainContext, cfg vm.Config) ([]*types.Transaction, types.Receipts, []*types.Log, error) { +// HandleRip7560Transactions apply state changes of all sequential RIP-7560 transactions. +// During block building the 'skipInvalid' flag is set to False, and invalid transactions are silently ignored. +// Returns an array of included transactions. +func HandleRip7560Transactions( + transactions []*types.Transaction, + index int, + statedb *state.StateDB, + coinbase *common.Address, + header *types.Header, + gp *GasPool, + chainConfig *params.ChainConfig, + bc ChainContext, + cfg vm.Config, + skipInvalid bool, +) ([]*types.Transaction, types.Receipts, []*types.Rip7560TransactionDebugInfo, []*types.Log, error) { validatedTransactions := make([]*types.Transaction, 0) receipts := make([]*types.Receipt, 0) allLogs := make([]*types.Log, 0) - iTransactions, iReceipts, iLogs, err := handleRip7560Transactions(transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg) + iTransactions, iReceipts, validationFailureReceipts, iLogs, err := handleRip7560Transactions( + transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg, skipInvalid, + ) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } validatedTransactions = append(validatedTransactions, iTransactions...) receipts = append(receipts, iReceipts...) allLogs = append(allLogs, iLogs...) - return validatedTransactions, receipts, allLogs, nil + return validatedTransactions, receipts, validationFailureReceipts, allLogs, nil } -func handleRip7560Transactions(transactions []*types.Transaction, index int, statedb *state.StateDB, coinbase *common.Address, header *types.Header, gp *GasPool, chainConfig *params.ChainConfig, bc ChainContext, cfg vm.Config) ([]*types.Transaction, types.Receipts, []*types.Log, error) { +func handleRip7560Transactions( + transactions []*types.Transaction, + index int, + statedb *state.StateDB, + coinbase *common.Address, + header *types.Header, + gp *GasPool, + chainConfig *params.ChainConfig, + bc ChainContext, + cfg vm.Config, + skipInvalid bool, +) ([]*types.Transaction, types.Receipts, []*types.Rip7560TransactionDebugInfo, []*types.Log, error) { validationPhaseResults := make([]*ValidationPhaseResult, 0) validatedTransactions := make([]*types.Transaction, 0) + validationFailureInfos := make([]*types.Rip7560TransactionDebugInfo, 0) receipts := make([]*types.Receipt, 0) allLogs := make([]*types.Log, 0) for i, tx := range transactions[index:] { @@ -118,10 +147,28 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta } statedb.SetTxContext(tx.Hash(), index+i) - + beforeValidationSnapshotId := statedb.Snapshot() vpr, vpe := ApplyRip7560ValidationPhases(chainConfig, bc, coinbase, gp, statedb, header, tx, cfg) if vpe != nil { - return nil, nil, nil, vpe + if skipInvalid { + log.Error("Validation failed during block building, should not happen, skipping transaction", "error", vpe) + debugInfo := &types.Rip7560TransactionDebugInfo{ + TxHash: tx.Hash(), + RevertData: vpe.Error(), + FrameReverted: false, + RevertEntityName: "n/a", + } + validationFailureInfos = append(validationFailureInfos, debugInfo) + var vpeCast *ValidationPhaseError + if errors.As(vpe, &vpeCast) { + debugInfo.RevertData = vpeCast.reason + debugInfo.FrameReverted = vpeCast.frameReverted + debugInfo.RevertEntityName = *vpeCast.revertEntityName + } + statedb.RevertToSnapshot(beforeValidationSnapshotId) + continue + } + return nil, nil, nil, nil, vpe } validationPhaseResults = append(validationPhaseResults, vpr) validatedTransactions = append(validatedTransactions, tx) @@ -136,14 +183,14 @@ func handleRip7560Transactions(transactions []*types.Transaction, index int, sta receipt, err := ApplyRip7560ExecutionPhase(chainConfig, vpr, bc, coinbase, gp, statedb, header, cfg) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } statedb.Finalise(true) receipts = append(receipts, receipt) allLogs = append(allLogs, receipt.Logs...) } - return validatedTransactions, receipts, allLogs, nil + return validatedTransactions, receipts, validationFailureInfos, allLogs, nil } func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB, gasPrice *uint256.Int) (uint64, *uint256.Int, error) { @@ -201,7 +248,7 @@ func CheckNonceRip7560(st *StateTransition, tx *types.Rip7560AccountAbstractionT func performNonceCheckFrameRip7712(st *StateTransition, tx *types.Rip7560AccountAbstractionTx) (uint64, error) { if !st.evm.ChainConfig().IsRIP7712(st.evm.Context.BlockNumber) { - return 0, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce is disabled"), nil, nil) + return 0, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce is disabled"), nil, nil, false) } nonceManagerMessageData := prepareNonceManagerMessage(tx) resultNonceManager := CallFrame(st, &AA_ENTRY_POINT, &AA_NONCE_MANAGER, nonceManagerMessageData, st.gasRemaining) @@ -210,6 +257,7 @@ func performNonceCheckFrameRip7712(st *StateTransition, tx *types.Rip7560Account fmt.Errorf("RIP-7712 nonce validation failed: %w", resultNonceManager.Err), resultNonceManager.ReturnData, ptr("NonceManager"), + true, ) } return resultNonceManager.UsedGas, nil @@ -251,7 +299,7 @@ func ApplyRip7560ValidationPhases( gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) if err != nil { - return nil, newValidationPhaseError(err, nil, nil) + return nil, newValidationPhaseError(err, nil, nil, false) } blockContext := NewEVMBlockContext(header, bc, author) @@ -301,6 +349,7 @@ func ApplyRip7560ValidationPhases( resultDeployer.Err, resultDeployer.ReturnData, ptr("deployer"), + true, ) } if statedb.GetCodeSize(*sender) == 0 { @@ -308,7 +357,7 @@ func ApplyRip7560ValidationPhases( fmt.Errorf( "sender not deployed by factory, sender:%s factory:%s", sender.String(), aatx.Deployer.String(), - ), nil, nil) + ), nil, nil, false) } deploymentUsedGas = resultDeployer.UsedGas } else { @@ -316,7 +365,7 @@ func ApplyRip7560ValidationPhases( return nil, newValidationPhaseError( fmt.Errorf( "account is not deployed and no factory is specified, account:%s", sender.String(), - ), nil, nil) + ), nil, nil, false) } if !aatx.IsRip7712Nonce() { statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) @@ -328,7 +377,7 @@ func ApplyRip7560ValidationPhases( signingHash := signer.Hash(tx) accountValidationMsg, err := prepareAccountValidationMessage(aatx, signingHash) if err != nil { - return nil, newValidationPhaseError(err, nil, nil) + return nil, newValidationPhaseError(err, nil, nil, false) } resultAccountValidation := CallFrame(st, &AA_ENTRY_POINT, aatx.Sender, accountValidationMsg, aatx.ValidationGasLimit-deploymentUsedGas) if resultAccountValidation.Failed() { @@ -336,11 +385,12 @@ func ApplyRip7560ValidationPhases( resultAccountValidation.Err, resultAccountValidation.ReturnData, ptr("account"), + true, ) } aad, err := validateAccountEntryPointCall(epc, aatx.Sender) if err != nil { - return nil, newValidationPhaseError(err, nil, nil) + return nil, newValidationPhaseError(err, nil, nil, false) } // clear the EntryPoint calls array after parsing @@ -350,7 +400,7 @@ func ApplyRip7560ValidationPhases( err = validateValidityTimeRange(header.Time, aad.ValidAfter.Uint64(), aad.ValidUntil.Uint64()) if err != nil { - return nil, newValidationPhaseError(err, nil, nil) + return nil, newValidationPhaseError(err, nil, nil, false) } paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(st, epc, tx, signingHash, header) @@ -389,7 +439,7 @@ func applyPaymasterValidationFrame(st *StateTransition, epc *EntryPointCall, tx var pmValidationUsedGas uint64 paymasterMsg, err := preparePaymasterValidationMessage(aatx, signingHash) if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil, false) } if paymasterMsg == nil { return nil, 0, 0, 0, nil @@ -401,16 +451,17 @@ func applyPaymasterValidationFrame(st *StateTransition, epc *EntryPointCall, tx resultPm.Err, resultPm.ReturnData, ptr("paymaster"), + true, ) } pmValidationUsedGas = resultPm.UsedGas apd, err := validatePaymasterEntryPointCall(epc, aatx.Paymaster) if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil, false) } err = validateValidityTimeRange(header.Time, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64()) if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil) + return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil, false) } return apd.Context, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 7887292a017c..35fa241adff1 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -293,3 +293,10 @@ type BundleReceipt struct { GasPaidPriority *big.Int BlockTimestamp uint64 } + +type Rip7560TransactionDebugInfo struct { + TxHash common.Hash + RevertEntityName string + FrameReverted bool // true if reverted, false if did not call EntryPoint callback + RevertData string +} diff --git a/eth/api_backend_rip7560.go b/eth/api_backend_rip7560.go index 359e8e7166cf..e2977dcfa55d 100644 --- a/eth/api_backend_rip7560.go +++ b/eth/api_backend_rip7560.go @@ -17,3 +17,22 @@ func (b *EthAPIBackend) SubmitRip7560Bundle(bundle *types.ExternallyReceivedBund func (b *EthAPIBackend) GetRip7560BundleStatus(ctx context.Context, hash common.Hash) (*types.BundleReceipt, error) { return b.eth.txPool.GetRip7560BundleStatus(hash) } + +// GetRip7560TransactionDebugInfo debug method for RIP-7560 +func (b *EthAPIBackend) GetRip7560TransactionDebugInfo(hash common.Hash) (map[string]interface{}, error) { + info := b.eth.blockchain.GetRip7560TransactionDebugInfo(hash) + if info == nil { + return nil, nil + } + return map[string]interface{}{ + "transactionHash": hash, + "revertEntityName": info.RevertEntityName, + "revertData": info.RevertData, + "frameReverted": info.FrameReverted, + }, nil +} + +// SetRip7560TransactionDebugInfo debug method for RIP-7560 +func (b *EthAPIBackend) SetRip7560TransactionDebugInfo(infos []*types.Rip7560TransactionDebugInfo) { + b.eth.blockchain.SetRip7560TransactionDebugInfo(infos) +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 35c4a9041975..3665a4b9605a 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -99,8 +99,14 @@ type Backend interface { ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) // RIP-7560 specific functions + SubmitRip7560Bundle(bundle *types.ExternallyReceivedBundle) error GetRip7560BundleStatus(ctx context.Context, hash common.Hash) (*types.BundleReceipt, error) + + // RIP-7560 debug + + GetRip7560TransactionDebugInfo(common.Hash) (map[string]interface{}, error) + SetRip7560TransactionDebugInfo(infos []*types.Rip7560TransactionDebugInfo) } func GetAPIs(apiBackend Backend) []rpc.API { diff --git a/internal/ethapi/rip7560api.go b/internal/ethapi/rip7560api.go index 4043cfc00c6b..5adb14ab4d21 100644 --- a/internal/ethapi/rip7560api.go +++ b/internal/ethapi/rip7560api.go @@ -37,6 +37,10 @@ func (s *TransactionAPI) GetRip7560BundleStatus(ctx context.Context, hash common return bundleStats, err } +func (s *TransactionAPI) GetRip7560TransactionDebugInfo(hash common.Hash) (map[string]interface{}, error) { + return s.b.GetRip7560TransactionDebugInfo(hash) +} + // CalculateBundleHash // TODO: If this code is indeed necessary, keep it in utils; better - remove altogether. func CalculateBundleHash(txs []*types.Transaction) common.Hash { diff --git a/miner/worker.go b/miner/worker.go index c63ad869c3ee..e299d5ef3741 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -381,7 +381,8 @@ func (miner *Miner) commitRip7560TransactionsBundle(env *environment, txs *types env.gasPool = new(core.GasPool).AddGas(gasLimit) } - validatedTxs, receipts, _, err := core.HandleRip7560Transactions(txs.Transactions, 0, env.state, &env.coinbase, env.header, env.gasPool, miner.chainConfig, miner.chain, vm.Config{}) + validatedTxs, receipts, validationFailureInfos, _, err := core.HandleRip7560Transactions(txs.Transactions, 0, env.state, &env.coinbase, env.header, env.gasPool, miner.chainConfig, miner.chain, vm.Config{}, true) + miner.chain.SetRip7560TransactionDebugInfo(validationFailureInfos) if err != nil { return err } From 429977f98e395c7f72fea4e8d5c22363e11aac44 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 12 Sep 2024 14:34:42 +0200 Subject: [PATCH 61/73] AA-382: Inject system events (#34) * WIP: Inject system events * WIP: Encode the event correctly and pass relevant data * Implement revert reason system events * Fix crash * Fix unhandled error * Remove unncecessary gas fields, expose failure type in status * Add 'ExecutionStatusExecutionAndPostOpFailure' status * Add 'RIP7560AccountDeployed' event; add 'nonceKey' event parameter; cleanup --- core/rip7560_abi.go | 93 ++++++++++++++++++++++ core/rip7560_abi_constants.go | 134 +++++++++++++++++++++++++++++++- core/state_processor_rip7560.go | 129 ++++++++++++++++++++++++++++-- 3 files changed, 348 insertions(+), 8 deletions(-) diff --git a/core/rip7560_abi.go b/core/rip7560_abi.go index a756b9204c59..4d0e8b1f4773 100644 --- a/core/rip7560_abi.go +++ b/core/rip7560_abi.go @@ -96,3 +96,96 @@ func abiDecodeAcceptPaymaster(input []byte, allowSigFail bool) (*AcceptPaymaster } return acceptPaymasterData, err } + +func abiEncodeRIP7560TransactionEvent( + aatx *types.Rip7560AccountAbstractionTx, + executionStatus uint64, +) (topics []common.Hash, data []byte, error error) { + id := Rip7560Abi.Events["RIP7560TransactionEvent"].ID + paymaster := aatx.Paymaster + if paymaster == nil { + paymaster = &common.Address{} + } + deployer := aatx.Deployer + if deployer == nil { + deployer = &common.Address{} + } + inputs := Rip7560Abi.Events["RIP7560TransactionEvent"].Inputs + data, error = inputs.NonIndexed().Pack( + aatx.NonceKey, + big.NewInt(int64(aatx.Nonce)), + big.NewInt(int64(executionStatus)), + ) + if error != nil { + return nil, nil, error + } + topics = []common.Hash{id, {}, {}} + topics[1] = [32]byte(common.LeftPadBytes(aatx.Sender.Bytes()[:], 32)) + topics[2] = [32]byte(common.LeftPadBytes(paymaster.Bytes()[:], 32)) + return topics, data, nil +} + +func abiEncodeRIP7560AccountDeployedEvent( + aatx *types.Rip7560AccountAbstractionTx, +) (topics []common.Hash, data []byte, error error) { + id := Rip7560Abi.Events["RIP7560AccountDeployed"].ID + paymaster := aatx.Paymaster + if paymaster == nil { + paymaster = &common.Address{} + } + deployer := aatx.Deployer + if deployer == nil { + deployer = &common.Address{} + } + if error != nil { + return nil, nil, error + } + topics = []common.Hash{id, {}, {}, {}} + topics[1] = [32]byte(common.LeftPadBytes(aatx.Sender.Bytes()[:], 32)) + topics[2] = [32]byte(common.LeftPadBytes(paymaster.Bytes()[:], 32)) + topics[3] = [32]byte(common.LeftPadBytes(deployer.Bytes()[:], 32)) + return topics, make([]byte, 0), nil +} + +func abiEncodeRIP7560TransactionRevertReasonEvent( + aatx *types.Rip7560AccountAbstractionTx, + revertData []byte, +) (topics []common.Hash, data []byte, error error) { + id := Rip7560Abi.Events["RIP7560TransactionRevertReason"].ID + inputs := Rip7560Abi.Events["RIP7560TransactionRevertReason"].Inputs + data, error = inputs.NonIndexed().Pack( + aatx.NonceKey, + big.NewInt(int64(aatx.Nonce)), + revertData, + ) + if error != nil { + return nil, nil, error + } + topics = []common.Hash{id, {}} + topics[1] = [32]byte(common.LeftPadBytes(aatx.Sender.Bytes()[:], 32)) + return topics, data, nil +} + +func abiEncodeRIP7560TransactionPostOpRevertReasonEvent( + aatx *types.Rip7560AccountAbstractionTx, + revertData []byte, +) (topics []common.Hash, data []byte, error error) { + id := Rip7560Abi.Events["RIP7560TransactionPostOpRevertReason"].ID + paymaster := aatx.Paymaster + if paymaster == nil { + paymaster = &common.Address{} + } + inputs := Rip7560Abi.Events["RIP7560TransactionPostOpRevertReason"].Inputs + data, error = inputs.NonIndexed().Pack( + aatx.NonceKey, + big.NewInt(int64(aatx.Nonce)), + revertData, + ) + if error != nil { + return nil, nil, error + } + topics = []common.Hash{id, {}, {}} + topics[1] = [32]byte(common.LeftPadBytes(aatx.Sender.Bytes()[:], 32)) + topics[2] = [32]byte(common.LeftPadBytes(paymaster.Bytes()[:], 32)) + return topics, data, nil +} diff --git a/core/rip7560_abi_constants.go b/core/rip7560_abi_constants.go index e324361baad8..1a42e459d4d2 100644 --- a/core/rip7560_abi_constants.go +++ b/core/rip7560_abi_constants.go @@ -8,7 +8,7 @@ const Rip7560AbiVersion = 0 var AA_ENTRY_POINT = common.HexToAddress("0x0000000000000000000000000000000000007560") var AA_SENDER_CREATOR = common.HexToAddress("0x00000000000000000000000000000000ffff7560") -// always pay 10% of unused execution gas +// AA_GAS_PENALTY_PCT is always applied to unused execution and postOp gas limits const AA_GAS_PENALTY_PCT = 10 const Rip7560AbiJson = ` @@ -73,5 +73,135 @@ const Rip7560AbiJson = ` {"name": "validUntil","type": "uint256"}, {"name": "context","type": "bytes"} ] - } + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "paymaster", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "nonceKey", + "type": "uint256" + }, +{ + "indexed": false, + "internalType": "uint256", + "name": "nonceSequence", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bool", + "name": "executionStatus", + "type": "uint256" + } + ], + "name": "RIP7560TransactionEvent", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "nonceKey", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "nonceSequence", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "revertReason", + "type": "bytes" + } + ], + "name": "RIP7560TransactionRevertReason", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "paymaster", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "nonceKey", + "type": "uint256" + }, +{ + "indexed": false, + "internalType": "uint256", + "name": "nonceSequence", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "revertReason", + "type": "bytes" + } + ], + "name": "RIP7560TransactionPostOpRevertReason", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "paymaster", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "deployer", + "type": "address" + } + ], + "name": "RIP7560AccountDeployed", + "type": "event" + } ]` diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 489745fe40b2..59ed3504958c 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -42,6 +42,13 @@ type ValidationPhaseResult struct { PmValidUntil uint64 } +const ( + ExecutionStatusSuccess = uint64(0) + ExecutionStatusExecutionFailure = uint64(1) + ExecutionStatusPostOpFailure = uint64(2) + ExecutionStatusExecutionAndPostOpFailure = uint64(3) +) + // ValidationPhaseError is an API error that encompasses an EVM revert with JSON error // code and a binary data blob. type ValidationPhaseError struct { @@ -491,9 +498,11 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase accountExecutionMsg := prepareAccountExecutionMessage(vpr.Tx) beforeExecSnapshotId := statedb.Snapshot() executionResult := CallFrame(st, &AA_ENTRY_POINT, sender, accountExecutionMsg, aatx.Gas) - executionStatus := types.ReceiptStatusSuccessful + receiptStatus := types.ReceiptStatusSuccessful + executionStatus := ExecutionStatusSuccess if executionResult.Failed() { - executionStatus = types.ReceiptStatusFailed + receiptStatus = types.ReceiptStatusFailed + executionStatus = ExecutionStatusExecutionFailure } executionGasPenalty := (aatx.Gas - executionResult.UsedGas) * AA_GAS_PENALTY_PCT / 100 @@ -506,21 +515,49 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase executionGasPenalty var postOpGasUsed uint64 + var paymasterPostOpResult *ExecutionResult if len(vpr.PaymasterContext) != 0 { - paymasterPostOpResult := applyPaymasterPostOpFrame(st, aatx, vpr, !executionResult.Failed(), gasUsed) + paymasterPostOpResult = applyPaymasterPostOpFrame(st, aatx, vpr, !executionResult.Failed(), gasUsed) postOpGasUsed = paymasterPostOpResult.UsedGas // PostOp failed, reverting execution changes - if paymasterPostOpResult.Err != nil { + if paymasterPostOpResult.Failed() { statedb.RevertToSnapshot(beforeExecSnapshotId) - executionStatus = types.ReceiptStatusFailed + receiptStatus = types.ReceiptStatusFailed + if executionStatus == ExecutionStatusExecutionFailure { + executionStatus = ExecutionStatusExecutionAndPostOpFailure + } + executionStatus = ExecutionStatusPostOpFailure } postOpGasPenalty := (aatx.PostOpGas - postOpGasUsed) * AA_GAS_PENALTY_PCT / 100 gasUsed += postOpGasUsed + postOpGasPenalty } + err = injectRIP7560TransactionEvent(aatx, executionStatus, header, statedb) + if err != nil { + return nil, err + } + if aatx.Deployer != nil { + err = injectRIP7560AccountDeployedEvent(aatx, header, statedb) + if err != nil { + return nil, err + } + } + if executionResult.Failed() { + err = injectRIP7560TransactionRevertReasonEvent(aatx, executionResult.ReturnData, header, statedb) + if err != nil { + return nil, err + } + } + if paymasterPostOpResult != nil && paymasterPostOpResult.Failed() { + err = injectRIP7560TransactionPostOpRevertReasonEvent(aatx, paymasterPostOpResult.ReturnData, header, statedb) + if err != nil { + return nil, err + } + } + receipt := &types.Receipt{Type: vpr.Tx.Type(), TxHash: vpr.Tx.Hash(), GasUsed: gasUsed, CumulativeGasUsed: gasUsed} - receipt.Status = executionStatus + receipt.Status = receiptStatus refundPayer(vpr, statedb, gasUsed) @@ -533,6 +570,86 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase return receipt, nil } +func injectRIP7560TransactionEvent( + aatx *types.Rip7560AccountAbstractionTx, + executionStatus uint64, + header *types.Header, + statedb *state.StateDB, +) error { + topics, data, err := abiEncodeRIP7560TransactionEvent(aatx, executionStatus) + if err != nil { + return err + } + err = injectEvent(topics, data, header.Number.Uint64(), statedb) + if err != nil { + return err + } + return nil +} + +func injectRIP7560AccountDeployedEvent( + aatx *types.Rip7560AccountAbstractionTx, + header *types.Header, + statedb *state.StateDB, +) error { + topics, data, err := abiEncodeRIP7560AccountDeployedEvent(aatx) + if err != nil { + return err + } + err = injectEvent(topics, data, header.Number.Uint64(), statedb) + if err != nil { + return err + } + return nil +} + +func injectRIP7560TransactionRevertReasonEvent( + aatx *types.Rip7560AccountAbstractionTx, + revertData []byte, + header *types.Header, + statedb *state.StateDB, +) error { + topics, data, err := abiEncodeRIP7560TransactionRevertReasonEvent(aatx, revertData) + if err != nil { + return err + } + err = injectEvent(topics, data, header.Number.Uint64(), statedb) + if err != nil { + return err + } + return nil +} + +func injectRIP7560TransactionPostOpRevertReasonEvent( + aatx *types.Rip7560AccountAbstractionTx, + revertData []byte, + header *types.Header, + statedb *state.StateDB, +) error { + topics, data, err := abiEncodeRIP7560TransactionPostOpRevertReasonEvent(aatx, revertData) + if err != nil { + return err + } + err = injectEvent(topics, data, header.Number.Uint64(), statedb) + if err != nil { + return err + } + return nil +} + +func injectEvent(topics []common.Hash, data []byte, blockNumber uint64, statedb *state.StateDB) error { + transactionLog := &types.Log{ + Address: AA_ENTRY_POINT, + Topics: topics, + Data: data, + // This is a non-consensus field, but assigned here because + // core/state doesn't know the current block number. + BlockNumber: blockNumber, + } + statedb.AddLog(transactionLog) + return nil +} + func prepareAccountValidationMessage(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { return abiEncodeValidateTransaction(tx, signingHash) } From ae32a25869473d3a72f1c87745cdccf4dc4b8514 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Thu, 12 Sep 2024 15:21:03 +0200 Subject: [PATCH 62/73] AA-402: 'CallDataGasCost' is not a separate component of 'TotalGasLimit' (#36) * AA-402: (WIP) 'CallDataGasCost' is not a separate component of 'TotalGasLimit' * Remove --- core/types/tx_rip7560.go | 6 +----- params/protocol_params.go | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 35fa241adff1..11a7b35471d9 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -145,13 +145,9 @@ func (tx *Rip7560AccountAbstractionTx) CallDataGasCost() (uint64, error) { } func (tx *Rip7560AccountAbstractionTx) TotalGasLimit() (uint64, error) { - callDataGasCost, err := tx.CallDataGasCost() - if err != nil { - return 0, err - } return sumGas( + params.Rip7560TxGas, tx.Gas, tx.ValidationGasLimit, tx.PaymasterValidationGasLimit, tx.PostOpGas, - callDataGasCost, ) } diff --git a/params/protocol_params.go b/params/protocol_params.go index 8ffe8ee75db1..eb8241613eb9 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -34,6 +34,7 @@ const ( CallValueTransferGas uint64 = 9000 // Paid for CALL when the value transfer is non-zero. CallNewAccountGas uint64 = 25000 // Paid for CALL when the destination address didn't exist prior. TxGas uint64 = 21000 // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions. + Rip7560TxGas uint64 = 15000 TxGasContractCreation uint64 = 53000 // Per transaction that creates a contract. NOTE: Not payable on data of calls between transactions. TxDataZeroGas uint64 = 4 // Per byte of data attached to a transaction that equals zero. NOTE: Not payable on data of calls between transactions. QuadCoeffDiv uint64 = 512 // Divisor for the quadratic particle of the memory cost equation. From fe783a4810ee50a6a4fd996179965c8473b757cd Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 16 Sep 2024 12:56:23 +0200 Subject: [PATCH 63/73] Fix 'CumulativeGasUsed' not being calculated correctly (#38) * Fix 'CumulativeGasUsed' not being calculated correctly * call Prepare to initialize warm addresses * remove prints --------- Co-authored-by: Dror Tirosh --- core/state_processor.go | 2 +- core/state_processor_rip7560.go | 27 +++++++++++++++++++++++---- miner/worker.go | 2 +- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 6be7f5009707..cb3e27f39624 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -85,7 +85,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if tx.Type() == types.Rip7560Type { // HandleRip7560Transactions accepts a transaction array and in the future bundle handling will need this tmpTxs := [1]*types.Transaction{tx} - _, validatedTxsReceipts, _, validateTxsLogs, err := HandleRip7560Transactions(tmpTxs[:], 0, statedb, &context.Coinbase, header, gp, p.config, p.bc, cfg, false) + _, validatedTxsReceipts, _, validateTxsLogs, err := HandleRip7560Transactions(tmpTxs[:], 0, statedb, &context.Coinbase, header, gp, p.config, p.bc, cfg, false, usedGas) receipts = append(receipts, validatedTxsReceipts...) allLogs = append(allLogs, validateTxsLogs...) if err != nil { diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 59ed3504958c..730ba6bbedb3 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -114,13 +114,14 @@ func HandleRip7560Transactions( bc ChainContext, cfg vm.Config, skipInvalid bool, + usedGas *uint64, ) ([]*types.Transaction, types.Receipts, []*types.Rip7560TransactionDebugInfo, []*types.Log, error) { validatedTransactions := make([]*types.Transaction, 0) receipts := make([]*types.Receipt, 0) allLogs := make([]*types.Log, 0) iTransactions, iReceipts, validationFailureReceipts, iLogs, err := handleRip7560Transactions( - transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg, skipInvalid, + transactions, index, statedb, coinbase, header, gp, chainConfig, bc, cfg, skipInvalid, usedGas, ) if err != nil { return nil, nil, nil, nil, err @@ -142,6 +143,7 @@ func handleRip7560Transactions( bc ChainContext, cfg vm.Config, skipInvalid bool, + usedGas *uint64, ) ([]*types.Transaction, types.Receipts, []*types.Rip7560TransactionDebugInfo, []*types.Log, error) { validationPhaseResults := make([]*ValidationPhaseResult, 0) validatedTransactions := make([]*types.Transaction, 0) @@ -187,7 +189,7 @@ func handleRip7560Transactions( // TODO: this will miss all validation phase events - pass in 'vpr' // statedb.SetTxContext(vpr.Tx.Hash(), i) - receipt, err := ApplyRip7560ExecutionPhase(chainConfig, vpr, bc, coinbase, gp, statedb, header, cfg) + receipt, err := ApplyRip7560ExecutionPhase(chainConfig, vpr, bc, coinbase, gp, statedb, header, cfg, usedGas) if err != nil { return nil, nil, nil, nil, err @@ -316,6 +318,10 @@ func ApplyRip7560ValidationPhases( GasPrice: gasPrice, } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) + rules := evm.ChainConfig().Rules(evm.Context.BlockNumber, evm.Context.Random != nil, evm.Context.Time) + + statedb.Prepare(rules, *sender, evm.Context.Coinbase, &AA_ENTRY_POINT, vm.ActivePrecompiles(rules), tx.AccessList()) + epc := &EntryPointCall{} if evm.Config.Tracer == nil { @@ -480,7 +486,17 @@ func applyPaymasterPostOpFrame(st *StateTransition, aatx *types.Rip7560AccountAb return paymasterPostOpResult } -func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhaseResult, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, cfg vm.Config) (*types.Receipt, error) { +func ApplyRip7560ExecutionPhase( + config *params.ChainConfig, + vpr *ValidationPhaseResult, + bc ChainContext, + author *common.Address, + gp *GasPool, + statedb *state.StateDB, + header *types.Header, + cfg vm.Config, + usedGas *uint64, +) (*types.Receipt, error) { blockContext := NewEVMBlockContext(header, bc, author) aatx := vpr.Tx.Rip7560TransactionData() @@ -555,7 +571,10 @@ func ApplyRip7560ExecutionPhase(config *params.ChainConfig, vpr *ValidationPhase } } - receipt := &types.Receipt{Type: vpr.Tx.Type(), TxHash: vpr.Tx.Hash(), GasUsed: gasUsed, CumulativeGasUsed: gasUsed} + // TODO: naming convention hell!!! 'usedGas' is 'CumulativeGasUsed' in block processing + *usedGas += gasUsed + + receipt := &types.Receipt{Type: vpr.Tx.Type(), TxHash: vpr.Tx.Hash(), GasUsed: gasUsed, CumulativeGasUsed: *usedGas} receipt.Status = receiptStatus diff --git a/miner/worker.go b/miner/worker.go index e299d5ef3741..caf415efccd4 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -381,7 +381,7 @@ func (miner *Miner) commitRip7560TransactionsBundle(env *environment, txs *types env.gasPool = new(core.GasPool).AddGas(gasLimit) } - validatedTxs, receipts, validationFailureInfos, _, err := core.HandleRip7560Transactions(txs.Transactions, 0, env.state, &env.coinbase, env.header, env.gasPool, miner.chainConfig, miner.chain, vm.Config{}, true) + validatedTxs, receipts, validationFailureInfos, _, err := core.HandleRip7560Transactions(txs.Transactions, 0, env.state, &env.coinbase, env.header, env.gasPool, miner.chainConfig, miner.chain, vm.Config{}, true, &env.header.GasUsed) miner.chain.SetRip7560TransactionDebugInfo(validationFailureInfos) if err != nil { return err From f05b85d38ef68eef814086481e0d041535e6e105 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 16 Sep 2024 15:34:08 +0200 Subject: [PATCH 64/73] AA-445: Charge validation gas for calldata and repurpose 'CallDataUsedGas' as 'PreTransactionGasCost' (#39) --- core/state_processor_rip7560.go | 80 +++++++++++++++++++-------------- core/state_transition.go | 5 +-- core/types/tx_rip7560.go | 34 +++++++++++++- 3 files changed, 80 insertions(+), 39 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 730ba6bbedb3..f964c2024e24 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -25,21 +25,21 @@ type EntryPointCall struct { } type ValidationPhaseResult struct { - TxIndex int - Tx *types.Transaction - TxHash common.Hash - PaymasterContext []byte - PreCharge *uint256.Int - EffectiveGasPrice *uint256.Int - CallDataUsedGas uint64 - NonceManagerUsedGas uint64 - DeploymentUsedGas uint64 - ValidationUsedGas uint64 - PmValidationUsedGas uint64 - SenderValidAfter uint64 - SenderValidUntil uint64 - PmValidAfter uint64 - PmValidUntil uint64 + TxIndex int + Tx *types.Transaction + TxHash common.Hash + PaymasterContext []byte + PreCharge *uint256.Int + EffectiveGasPrice *uint256.Int + PreTransactionGasCost uint64 + NonceManagerUsedGas uint64 + DeploymentUsedGas uint64 + ValidationUsedGas uint64 + PmValidationUsedGas uint64 + SenderValidAfter uint64 + SenderValidUntil uint64 + PmValidAfter uint64 + PmValidUntil uint64 } const ( @@ -344,6 +344,19 @@ func ApplyRip7560ValidationPhases( st.initialGas = gasLimit st.gasRemaining = gasLimit + preTransactionGasCost, err := aatx.PreTransactionGasCost() + if preTransactionGasCost > aatx.ValidationGasLimit { + return nil, newValidationPhaseError( + fmt.Errorf( + "insufficient ValidationGasLimit(%d) to cover PreTransactionGasCost(%d)", + aatx.ValidationGasLimit, preTransactionGasCost, + ), + nil, + nil, + false, + ) + } + /*** Nonce Manager Frame ***/ nonceManagerUsedGas, err := CheckNonceRip7560(st, aatx) if err != nil { @@ -356,7 +369,8 @@ func ApplyRip7560ValidationPhases( if statedb.GetCodeSize(*sender) != 0 { return nil, fmt.Errorf("account deployment failed: already deployed") } - resultDeployer := CallFrame(st, &AA_SENDER_CREATOR, aatx.Deployer, aatx.DeployerData, aatx.ValidationGasLimit) + deployerGasLimit := aatx.ValidationGasLimit - preTransactionGasCost + resultDeployer := CallFrame(st, &AA_SENDER_CREATOR, aatx.Deployer, aatx.DeployerData, deployerGasLimit) if resultDeployer.Failed() { return nil, newValidationPhaseError( resultDeployer.Err, @@ -392,7 +406,8 @@ func ApplyRip7560ValidationPhases( if err != nil { return nil, newValidationPhaseError(err, nil, nil, false) } - resultAccountValidation := CallFrame(st, &AA_ENTRY_POINT, aatx.Sender, accountValidationMsg, aatx.ValidationGasLimit-deploymentUsedGas) + accountGasLimit := aatx.ValidationGasLimit - preTransactionGasCost - deploymentUsedGas + resultAccountValidation := CallFrame(st, &AA_ENTRY_POINT, aatx.Sender, accountValidationMsg, accountGasLimit) if resultAccountValidation.Failed() { return nil, newValidationPhaseError( resultAccountValidation.Err, @@ -421,25 +436,24 @@ func ApplyRip7560ValidationPhases( return nil, err } - callDataUsedGas, err := aatx.CallDataGasCost() if err != nil { return nil, err } vpr := &ValidationPhaseResult{ - Tx: tx, - TxHash: tx.Hash(), - PreCharge: preCharge, - EffectiveGasPrice: gasPriceUint256, - PaymasterContext: paymasterContext, - CallDataUsedGas: callDataUsedGas, - DeploymentUsedGas: deploymentUsedGas, - NonceManagerUsedGas: nonceManagerUsedGas, - ValidationUsedGas: resultAccountValidation.UsedGas, - PmValidationUsedGas: pmValidationUsedGas, - SenderValidAfter: aad.ValidAfter.Uint64(), - SenderValidUntil: aad.ValidUntil.Uint64(), - PmValidAfter: pmValidAfter, - PmValidUntil: pmValidUntil, + Tx: tx, + TxHash: tx.Hash(), + PreCharge: preCharge, + EffectiveGasPrice: gasPriceUint256, + PaymasterContext: paymasterContext, + PreTransactionGasCost: preTransactionGasCost, + DeploymentUsedGas: deploymentUsedGas, + NonceManagerUsedGas: nonceManagerUsedGas, + ValidationUsedGas: resultAccountValidation.UsedGas, + PmValidationUsedGas: pmValidationUsedGas, + SenderValidAfter: aad.ValidAfter.Uint64(), + SenderValidUntil: aad.ValidUntil.Uint64(), + PmValidAfter: pmValidAfter, + PmValidUntil: pmValidUntil, } statedb.Finalise(true) @@ -526,7 +540,7 @@ func ApplyRip7560ExecutionPhase( vpr.NonceManagerUsedGas + vpr.DeploymentUsedGas + vpr.PmValidationUsedGas + - vpr.CallDataUsedGas + + vpr.PreTransactionGasCost + executionResult.UsedGas + executionGasPenalty diff --git a/core/state_transition.go b/core/state_transition.go index 3bb91c2f77b5..1a6a66a2fc14 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -68,10 +68,7 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028, isEIP3860 bool, isRIP7560InnerFrame ...bool) (uint64, error) { - if isRIP7560InnerFrame != nil && len(isRIP7560InnerFrame) > 0 && isRIP7560InnerFrame[0] { - return 0, nil - } +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028, isEIP3860 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 if isContractCreation && isHomestead { diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index 11a7b35471d9..e0cf564c7b99 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -140,8 +140,38 @@ func callDataCost(data []byte) uint64 { return nz*params.TxDataNonZeroGasEIP2028 + z*params.TxDataZeroGas } -func (tx *Rip7560AccountAbstractionTx) CallDataGasCost() (uint64, error) { - return sumGas(callDataCost(tx.DeployerData), callDataCost(tx.ExecutionData), callDataCost(tx.PaymasterData)) +func (tx *Rip7560AccountAbstractionTx) PreTransactionGasCost() (uint64, error) { + calldataGasCost, err := tx.callDataGasCost() + if err != nil { + return 0, err + } + accessListGasCost := tx.accessListGasCost() + eip7702CodeInsertionsGasCost := tx.eip7702CodeInsertionsGasCost() + return params.Rip7560TxGas + calldataGasCost + accessListGasCost + eip7702CodeInsertionsGasCost, nil +} + +func (tx *Rip7560AccountAbstractionTx) callDataGasCost() (uint64, error) { + return sumGas( + callDataCost(tx.AuthorizationData), + callDataCost(tx.DeployerData), + callDataCost(tx.ExecutionData), + callDataCost(tx.PaymasterData), + ) +} + +// note: copied from state_transition.go 'IntrinsicGas' function +func (tx *Rip7560AccountAbstractionTx) accessListGasCost() uint64 { + if tx.AccessList == nil { + return 0 + } + gas := uint64(len(tx.AccessList)) * params.TxAccessListAddressGas + gas += uint64(tx.AccessList.StorageKeys()) * params.TxAccessListStorageKeyGas + return gas +} + +// note: this function must be implemented if EIP-7702 transactions are enabled +func (tx *Rip7560AccountAbstractionTx) eip7702CodeInsertionsGasCost() uint64 { + return 0 } func (tx *Rip7560AccountAbstractionTx) TotalGasLimit() (uint64, error) { From 5b754c259042daab3d7b5f252b1d61bf32f7a6fb Mon Sep 17 00:00:00 2001 From: Dror Tirosh Date: Mon, 16 Sep 2024 19:25:59 +0300 Subject: [PATCH 65/73] AA-408 deduct gas for aatx (#28) * AA-408 deduct gas, pay coinbase * Fix 'CumulativeGasUsed' not being calculated correctly * call Prepare to initialize warm addresses * remove prints * reformat * PR comments --------- Co-authored-by: Alex Forshtat --- core/rip7560_abi.go | 2 +- core/state_processor_rip7560.go | 81 ++++++++++++++++++++++++--------- core/types/tx_rip7560.go | 4 ++ 3 files changed, 64 insertions(+), 23 deletions(-) diff --git a/core/rip7560_abi.go b/core/rip7560_abi.go index 4d0e8b1f4773..f8f243b25328 100644 --- a/core/rip7560_abi.go +++ b/core/rip7560_abi.go @@ -10,7 +10,7 @@ import ( "strings" ) -var Rip7560Abi, err = abi.JSON(strings.NewReader(Rip7560AbiJson)) +var Rip7560Abi, _ = abi.JSON(strings.NewReader(Rip7560AbiJson)) type AcceptAccountData struct { ValidAfter *big.Int diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index f964c2024e24..1e1fe01a0d0a 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -32,6 +32,8 @@ type ValidationPhaseResult struct { PreCharge *uint256.Int EffectiveGasPrice *uint256.Int PreTransactionGasCost uint64 + ValidationRefund uint64 + CallDataUsedGas uint64 NonceManagerUsedGas uint64 DeploymentUsedGas uint64 ValidationUsedGas uint64 @@ -211,11 +213,10 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St //TODO: check gasLimit against block gasPool preCharge := new(uint256.Int).SetUint64(gasLimit) preCharge = preCharge.Mul(preCharge, gasPrice) - balanceCheck := new(uint256.Int).Set(preCharge) chargeFrom := st.GasPayer() - if have, want := state.GetBalance(*chargeFrom), balanceCheck; have.Cmp(want) < 0 { + if have, want := state.GetBalance(*chargeFrom), preCharge; have.Cmp(want) < 0 { return 0, nil, fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) } @@ -291,7 +292,7 @@ func ptr(s string) *string { return &s } func ApplyRip7560ValidationPhases( chainConfig *params.ChainConfig, bc ChainContext, - author *common.Address, + coinbase *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, @@ -300,21 +301,17 @@ func ApplyRip7560ValidationPhases( ) (*ValidationPhaseResult, error) { aatx := tx.Rip7560TransactionData() - gasPrice := new(big.Int).Add(header.BaseFee, tx.GasTipCap()) - if gasPrice.Cmp(tx.GasFeeCap()) > 0 { - gasPrice = tx.GasFeeCap() - } - gasPriceUint256, _ := uint256.FromBig(gasPrice) - - gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) + gasPrice := aatx.EffectiveGasPrice(header.BaseFee) + effectiveGasPrice := uint256.MustFromBig(gasPrice) + gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, effectiveGasPrice) if err != nil { return nil, newValidationPhaseError(err, nil, nil, false) } - blockContext := NewEVMBlockContext(header, bc, author) - sender := tx.Rip7560TransactionData().Sender + blockContext := NewEVMBlockContext(header, bc, coinbase) + sender := aatx.Sender txContext := vm.TxContext{ - Origin: *sender, + Origin: *aatx.Sender, GasPrice: gasPrice, } evm := vm.NewEVM(blockContext, txContext, statedb, chainConfig, cfg) @@ -436,16 +433,16 @@ func ApplyRip7560ValidationPhases( return nil, err } - if err != nil { - return nil, err - } + gasRefund := st.state.GetRefund() + vpr := &ValidationPhaseResult{ Tx: tx, TxHash: tx.Hash(), PreCharge: preCharge, - EffectiveGasPrice: gasPriceUint256, + EffectiveGasPrice: effectiveGasPrice, PaymasterContext: paymasterContext, PreTransactionGasCost: preTransactionGasCost, + ValidationRefund: gasRefund, DeploymentUsedGas: deploymentUsedGas, NonceManagerUsedGas: nonceManagerUsedGas, ValidationUsedGas: resultAccountValidation.UsedGas, @@ -500,6 +497,14 @@ func applyPaymasterPostOpFrame(st *StateTransition, aatx *types.Rip7560AccountAb return paymasterPostOpResult } +func capRefund(getRefund uint64, gasUsed uint64) uint64 { + refund := gasUsed / params.RefundQuotientEIP3529 + if refund > getRefund { + return getRefund + } + return refund +} + func ApplyRip7560ExecutionPhase( config *params.ChainConfig, vpr *ValidationPhaseResult, @@ -530,6 +535,7 @@ func ApplyRip7560ExecutionPhase( executionResult := CallFrame(st, &AA_ENTRY_POINT, sender, accountExecutionMsg, aatx.Gas) receiptStatus := types.ReceiptStatusSuccessful executionStatus := ExecutionStatusSuccess + execRefund := capRefund(st.state.GetRefund(), executionResult.UsedGas) if executionResult.Failed() { receiptStatus = types.ReceiptStatusFailed executionStatus = ExecutionStatusExecutionFailure @@ -544,11 +550,14 @@ func ApplyRip7560ExecutionPhase( executionResult.UsedGas + executionGasPenalty + gasRefund := capRefund(execRefund+vpr.ValidationRefund, gasUsed) + var postOpGasUsed uint64 var paymasterPostOpResult *ExecutionResult if len(vpr.PaymasterContext) != 0 { - paymasterPostOpResult = applyPaymasterPostOpFrame(st, aatx, vpr, !executionResult.Failed(), gasUsed) + paymasterPostOpResult = applyPaymasterPostOpFrame(st, aatx, vpr, !executionResult.Failed(), gasUsed-gasRefund) postOpGasUsed = paymasterPostOpResult.UsedGas + gasRefund += capRefund(paymasterPostOpResult.RefundedGas, postOpGasUsed) // PostOp failed, reverting execution changes if paymasterPostOpResult.Failed() { statedb.RevertToSnapshot(beforeExecSnapshotId) @@ -559,10 +568,14 @@ func ApplyRip7560ExecutionPhase( executionStatus = ExecutionStatusPostOpFailure } postOpGasPenalty := (aatx.PostOpGas - postOpGasUsed) * AA_GAS_PENALTY_PCT / 100 - gasUsed += postOpGasUsed + postOpGasPenalty + postOpGasUsed += postOpGasPenalty + gasUsed += postOpGasUsed } + gasUsed -= gasRefund + refundPayer(vpr, statedb, gasUsed) + payCoinbase(st, aatx, gasUsed) - err = injectRIP7560TransactionEvent(aatx, executionStatus, header, statedb) + err := injectRIP7560TransactionEvent(aatx, executionStatus, header, statedb) if err != nil { return nil, err } @@ -592,8 +605,6 @@ func ApplyRip7560ExecutionPhase( receipt.Status = receiptStatus - refundPayer(vpr, statedb, gasUsed) - // Set the receipt logs and create the bloom filter. blockNumber := header.Number receipt.Logs = statedb.GetLogs(vpr.TxHash, blockNumber.Uint64(), common.Hash{}) @@ -683,6 +694,32 @@ func injectEvent(topics []common.Hash, data []byte, blockNumber uint64, statedb return nil } +// extracted from TransitionDb() +func payCoinbase(st *StateTransition, msg *types.Rip7560AccountAbstractionTx, gasUsed uint64) { + rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil, st.evm.Context.Time) + + effectiveTip := msg.GasTipCap + if rules.IsLondon { + effectiveTip = math.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee)) + } + + effectiveTipU256, _ := uint256.FromBig(effectiveTip) + + if st.evm.Config.NoBaseFee && msg.GasFeeCap.Sign() == 0 && msg.GasTipCap.Sign() == 0 { + // Skip fee payment when NoBaseFee is set and the fee fields + // are 0. This avoids a negative effectiveTip being applied to + // the coinbase when simulating calls. + } else { + fee := new(uint256.Int).SetUint64(gasUsed) + fee.Mul(fee, effectiveTipU256) + st.state.AddBalance(st.evm.Context.Coinbase, fee, tracing.BalanceIncreaseRewardTransactionFee) + // add the coinbase to the witness iff the fee is greater than 0 + if rules.IsEIP4762 && fee.Sign() != 0 { + st.evm.AccessEvents.BalanceGas(st.evm.Context.Coinbase, true) + } + } +} + func prepareAccountValidationMessage(tx *types.Rip7560AccountAbstractionTx, signingHash common.Hash) ([]byte, error) { return abiEncodeValidateTransaction(tx, signingHash) } diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index e0cf564c7b99..b0b6388b52f8 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -186,6 +186,10 @@ func (tx *Rip7560AccountAbstractionTx) IsRip7712Nonce() bool { return tx.NonceKey != nil && tx.NonceKey.Cmp(big.NewInt(0)) == 1 } +func (tx *Rip7560AccountAbstractionTx) EffectiveGasPrice(baseFee *big.Int) *big.Int { + return tx.effectiveGasPrice(new(big.Int), baseFee) +} + func (tx *Rip7560AccountAbstractionTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { if baseFee == nil { return dst.Set(tx.GasFeeCap) From e46cb6204865a4017e27f0ca6ccdbfb1c8c2fb5c Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 16 Sep 2024 18:49:04 +0200 Subject: [PATCH 66/73] AA-414: Consume block gas limit for RIP-7560 transactions --- core/state_processor_rip7560.go | 21 +++++++++++++++++++-- core/types/tx_rip7560.go | 6 +++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index f964c2024e24..78be6ed567e9 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -202,7 +202,12 @@ func handleRip7560Transactions( return validatedTransactions, receipts, validationFailureInfos, allLogs, nil } -func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.StateDB, gasPrice *uint256.Int) (uint64, *uint256.Int, error) { +func BuyGasRip7560Transaction( + st *types.Rip7560AccountAbstractionTx, + state vm.StateDB, + gasPrice *uint256.Int, + gp *GasPool, +) (uint64, *uint256.Int, error) { gasLimit, err := st.TotalGasLimit() if err != nil { return 0, nil, err @@ -220,6 +225,9 @@ func BuyGasRip7560Transaction(st *types.Rip7560AccountAbstractionTx, state vm.St } state.SubBalance(*chargeFrom, preCharge, 0) + if err := gp.SubGas(gasLimit); err != nil { + return 0, nil, err + } return gasLimit, preCharge, nil } @@ -306,7 +314,7 @@ func ApplyRip7560ValidationPhases( } gasPriceUint256, _ := uint256.FromBig(gasPrice) - gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256) + gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, gasPriceUint256, gp) if err != nil { return nil, newValidationPhaseError(err, nil, nil, false) } @@ -594,6 +602,15 @@ func ApplyRip7560ExecutionPhase( refundPayer(vpr, statedb, gasUsed) + // Also return remaining gas to the block gas counter so it is + // available for the next transaction. + totalGasLimit, _ := aatx.TotalGasLimit() + if totalGasLimit < gasUsed { + panic("cannot spend more gas than the total limit") + } + gasRemaining := totalGasLimit - gasUsed + gp.AddGas(gasRemaining) + // Set the receipt logs and create the bloom filter. blockNumber := header.Number receipt.Logs = statedb.GetLogs(vpr.TxHash, blockNumber.Uint64(), common.Hash{}) diff --git a/core/types/tx_rip7560.go b/core/types/tx_rip7560.go index e0cf564c7b99..040011ae58e0 100644 --- a/core/types/tx_rip7560.go +++ b/core/types/tx_rip7560.go @@ -118,7 +118,7 @@ func (tx *Rip7560AccountAbstractionTx) GasPayer() *common.Address { return tx.Sender } -func sumGas(vals ...uint64) (uint64, error) { +func SumGas(vals ...uint64) (uint64, error) { var sum uint64 for _, val := range vals { if val > 1<<62 { @@ -151,7 +151,7 @@ func (tx *Rip7560AccountAbstractionTx) PreTransactionGasCost() (uint64, error) { } func (tx *Rip7560AccountAbstractionTx) callDataGasCost() (uint64, error) { - return sumGas( + return SumGas( callDataCost(tx.AuthorizationData), callDataCost(tx.DeployerData), callDataCost(tx.ExecutionData), @@ -175,7 +175,7 @@ func (tx *Rip7560AccountAbstractionTx) eip7702CodeInsertionsGasCost() uint64 { } func (tx *Rip7560AccountAbstractionTx) TotalGasLimit() (uint64, error) { - return sumGas( + return SumGas( params.Rip7560TxGas, tx.Gas, tx.ValidationGasLimit, tx.PaymasterValidationGasLimit, tx.PostOpGas, ) From 90a379c182fce9af0caaec957992f8c6fab9dfca Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 16 Sep 2024 18:56:19 +0200 Subject: [PATCH 67/73] Remove unused function; extract 'validationPhaseUsedGas' --- core/state/statedb.go | 8 -------- core/state_processor_rip7560.go | 19 +++++++++++++------ 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index f69ddc301a2f..61e76cdd7788 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -723,14 +723,6 @@ func (s *StateDB) Copy() *StateDB { return state } -func (s *StateDB) AccessListCopy() *accessList { - return s.accessList.Copy() -} - -func (s *StateDB) SetAccessList(al *accessList) { - s.accessList = al -} - // Snapshot returns an identifier for the current revision of the state. func (s *StateDB) Snapshot() int { id := s.nextRevisionId diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 890cb331f3e0..dae269768b64 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -44,6 +44,16 @@ type ValidationPhaseResult struct { PmValidUntil uint64 } +func (vpr *ValidationPhaseResult) validationPhaseUsedGas() (uint64, error) { + return types.SumGas( + vpr.PreTransactionGasCost, + vpr.NonceManagerUsedGas, + vpr.DeploymentUsedGas, + vpr.ValidationUsedGas, + vpr.PmValidationUsedGas, + ) +} + const ( ExecutionStatusSuccess = uint64(0) ExecutionStatusExecutionFailure = uint64(1) @@ -550,11 +560,8 @@ func ApplyRip7560ExecutionPhase( } executionGasPenalty := (aatx.Gas - executionResult.UsedGas) * AA_GAS_PENALTY_PCT / 100 - gasUsed := vpr.ValidationUsedGas + - vpr.NonceManagerUsedGas + - vpr.DeploymentUsedGas + - vpr.PmValidationUsedGas + - vpr.PreTransactionGasCost + + validationPhaseUsedGas, _ := vpr.validationPhaseUsedGas() + gasUsed := validationPhaseUsedGas + executionResult.UsedGas + executionGasPenalty @@ -583,7 +590,7 @@ func ApplyRip7560ExecutionPhase( refundPayer(vpr, statedb, gasUsed) payCoinbase(st, aatx, gasUsed) - // Also return remaining gas to the block gas counter so it is + // Also return remaining gas to the block gas counter so it is // available for the next transaction. totalGasLimit, _ := aatx.TotalGasLimit() if totalGasLimit < gasUsed { From 87572bda15ca0a12fde30868082a286aedb6af00 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 16 Sep 2024 21:22:48 +0200 Subject: [PATCH 68/73] Expose gas limit reached error through the debug API --- core/state_processor_rip7560.go | 15 ++++++++++++--- eth/tracers/api_tracing_rip7560.go | 3 ++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index dae269768b64..03e15abbc537 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -82,6 +82,11 @@ func newValidationPhaseError( revertEntityName *string, frameReverted bool, ) *ValidationPhaseError { + // TODO: I have my doubts about this way of handling errors in Go. Is this a reasonable thing to do? + var vpeCast *ValidationPhaseError + if errors.As(innerErr, &vpeCast) { + return vpeCast + } var errorMessage string contractSubst := "" if revertEntityName != nil { @@ -184,7 +189,10 @@ func handleRip7560Transactions( if errors.As(vpe, &vpeCast) { debugInfo.RevertData = vpeCast.reason debugInfo.FrameReverted = vpeCast.frameReverted - debugInfo.RevertEntityName = *vpeCast.revertEntityName + debugInfo.RevertEntityName = "" + if vpeCast.revertEntityName != nil { + debugInfo.RevertEntityName = *vpeCast.revertEntityName + } } statedb.RevertToSnapshot(beforeValidationSnapshotId) continue @@ -232,12 +240,13 @@ func BuyGasRip7560Transaction( chargeFrom := st.GasPayer() if have, want := state.GetBalance(*chargeFrom), preCharge; have.Cmp(want) < 0 { - return 0, nil, fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) + return 0, nil, fmt.Errorf("%w: RIP-7560 address %v have %v want %v", ErrInsufficientFunds, chargeFrom.Hex(), have, want) } state.SubBalance(*chargeFrom, preCharge, 0) + println("BuyGasRip7560Transaction GP:", gp.String(), gasLimit) if err := gp.SubGas(gasLimit); err != nil { - return 0, nil, err + return 0, nil, newValidationPhaseError(err, nil, ptr("block gas limit"), false) } return gasLimit, preCharge, nil } diff --git a/eth/tracers/api_tracing_rip7560.go b/eth/tracers/api_tracing_rip7560.go index 03a5667c5e1d..3b7ac75c64ec 100644 --- a/eth/tracers/api_tracing_rip7560.go +++ b/eth/tracers/api_tracing_rip7560.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -137,7 +138,7 @@ func (api *Rip7560API) traceTx( // Call Prepare to clear out the statedb access list statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) - gp := new(core.GasPool).AddGas(10000000) + gp := new(core.GasPool).AddGas(math.MaxUint64) // TODO: this is added to allow our bundler checking the 'TraceValidation' API is supported on Geth if tx.Rip7560TransactionData().Sender.Cmp(common.HexToAddress("0x0000000000000000000000000000000000000000")) == 0 { From 1666139b752c4800c8116e261f6326e0146e7709 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Wed, 25 Sep 2024 19:34:39 +0200 Subject: [PATCH 69/73] AA-405: Perform multiple static validations on an RIP-7560 transaction (#42) --- core/state_processor_rip7560.go | 153 +++++++++++++++++++++++++------- 1 file changed, 123 insertions(+), 30 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 1e1fe01a0d0a..5c72dd5e2901 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -65,6 +65,14 @@ func (v *ValidationPhaseError) ErrorData() interface{} { return v.reason } +// wrapError creates a revertError instance for validation errors not caused by an on-chain revert +func wrapError( + innerErr error, +) *ValidationPhaseError { + return newValidationPhaseError(innerErr, nil, nil, false) + +} + // newValidationPhaseError creates a revertError instance with the provided revert data. func newValidationPhaseError( innerErr error, @@ -258,7 +266,7 @@ func CheckNonceRip7560(st *StateTransition, tx *types.Rip7560AccountAbstractionT func performNonceCheckFrameRip7712(st *StateTransition, tx *types.Rip7560AccountAbstractionTx) (uint64, error) { if !st.evm.ChainConfig().IsRIP7712(st.evm.Context.BlockNumber) { - return 0, newValidationPhaseError(fmt.Errorf("RIP-7712 nonce is disabled"), nil, nil, false) + return 0, wrapError(fmt.Errorf("RIP-7712 nonce is disabled")) } nonceManagerMessageData := prepareNonceManagerMessage(tx) resultNonceManager := CallFrame(st, &AA_ENTRY_POINT, &AA_NONCE_MANAGER, nonceManagerMessageData, st.gasRemaining) @@ -300,12 +308,16 @@ func ApplyRip7560ValidationPhases( cfg vm.Config, ) (*ValidationPhaseResult, error) { aatx := tx.Rip7560TransactionData() + err := performStaticValidation(aatx, statedb) + if err != nil { + return nil, wrapError(err) + } gasPrice := aatx.EffectiveGasPrice(header.BaseFee) effectiveGasPrice := uint256.MustFromBig(gasPrice) gasLimit, preCharge, err := BuyGasRip7560Transaction(aatx, statedb, effectiveGasPrice) if err != nil { - return nil, newValidationPhaseError(err, nil, nil, false) + return nil, wrapError(err) } blockContext := NewEVMBlockContext(header, bc, coinbase) @@ -342,16 +354,8 @@ func ApplyRip7560ValidationPhases( st.gasRemaining = gasLimit preTransactionGasCost, err := aatx.PreTransactionGasCost() - if preTransactionGasCost > aatx.ValidationGasLimit { - return nil, newValidationPhaseError( - fmt.Errorf( - "insufficient ValidationGasLimit(%d) to cover PreTransactionGasCost(%d)", - aatx.ValidationGasLimit, preTransactionGasCost, - ), - nil, - nil, - false, - ) + if err != nil { + return nil, err } /*** Nonce Manager Frame ***/ @@ -363,9 +367,6 @@ func ApplyRip7560ValidationPhases( /*** Deployer Frame ***/ var deploymentUsedGas uint64 if aatx.Deployer != nil { - if statedb.GetCodeSize(*sender) != 0 { - return nil, fmt.Errorf("account deployment failed: already deployed") - } deployerGasLimit := aatx.ValidationGasLimit - preTransactionGasCost resultDeployer := CallFrame(st, &AA_SENDER_CREATOR, aatx.Deployer, aatx.DeployerData, deployerGasLimit) if resultDeployer.Failed() { @@ -377,20 +378,14 @@ func ApplyRip7560ValidationPhases( ) } if statedb.GetCodeSize(*sender) == 0 { - return nil, newValidationPhaseError( + return nil, wrapError( fmt.Errorf( - "sender not deployed by factory, sender:%s factory:%s", + "sender not deployed by the deployer, sender:%s deployer:%s", sender.String(), aatx.Deployer.String(), - ), nil, nil, false) + )) } deploymentUsedGas = resultDeployer.UsedGas } else { - if statedb.GetCodeSize(*sender) == 0 { - return nil, newValidationPhaseError( - fmt.Errorf( - "account is not deployed and no factory is specified, account:%s", sender.String(), - ), nil, nil, false) - } if !aatx.IsRip7712Nonce() { statedb.SetNonce(*sender, statedb.GetNonce(*sender)+1) } @@ -401,7 +396,7 @@ func ApplyRip7560ValidationPhases( signingHash := signer.Hash(tx) accountValidationMsg, err := prepareAccountValidationMessage(aatx, signingHash) if err != nil { - return nil, newValidationPhaseError(err, nil, nil, false) + return nil, wrapError(err) } accountGasLimit := aatx.ValidationGasLimit - preTransactionGasCost - deploymentUsedGas resultAccountValidation := CallFrame(st, &AA_ENTRY_POINT, aatx.Sender, accountValidationMsg, accountGasLimit) @@ -415,7 +410,7 @@ func ApplyRip7560ValidationPhases( } aad, err := validateAccountEntryPointCall(epc, aatx.Sender) if err != nil { - return nil, newValidationPhaseError(err, nil, nil, false) + return nil, wrapError(err) } // clear the EntryPoint calls array after parsing @@ -425,7 +420,7 @@ func ApplyRip7560ValidationPhases( err = validateValidityTimeRange(header.Time, aad.ValidAfter.Uint64(), aad.ValidUntil.Uint64()) if err != nil { - return nil, newValidationPhaseError(err, nil, nil, false) + return nil, wrapError(err) } paymasterContext, pmValidationUsedGas, pmValidAfter, pmValidUntil, err := applyPaymasterValidationFrame(st, epc, tx, signingHash, header) @@ -457,13 +452,103 @@ func ApplyRip7560ValidationPhases( return vpr, nil } +func performStaticValidation( + aatx *types.Rip7560AccountAbstractionTx, + statedb *state.StateDB, +) error { + hasPaymaster := aatx.Paymaster != nil + hasPaymasterData := aatx.PaymasterData != nil && len(aatx.PaymasterData) != 0 + hasPaymasterGasLimit := aatx.PaymasterValidationGasLimit != 0 + hasDeployer := aatx.Deployer != nil + hasDeployerData := aatx.DeployerData != nil && len(aatx.DeployerData) != 0 + hasCodeSender := statedb.GetCodeSize(*aatx.Sender) != 0 + + if !hasDeployer && hasDeployerData { + return wrapError( + fmt.Errorf( + "deployer data of size %d is provided but deployer address is not set", + len(aatx.DeployerData), + ), + ) + } + if !hasPaymaster && (hasPaymasterData || hasPaymasterGasLimit) { + return wrapError( + fmt.Errorf( + "paymaster data of size %d (or a gas limit: %d) is provided but paymaster address is not set", + len(aatx.DeployerData), + aatx.PaymasterValidationGasLimit, + ), + ) + } + + if hasPaymaster { + if !hasPaymasterGasLimit { + return wrapError( + fmt.Errorf( + "paymaster address %s is provided but 'paymasterVerificationGasLimit' is zero", + aatx.Paymaster.String(), + ), + ) + } + hasCodePaymaster := statedb.GetCodeSize(*aatx.Paymaster) != 0 + if !hasCodePaymaster { + return wrapError( + fmt.Errorf( + "paymaster address %s is provided but contract has no code deployed", + aatx.Paymaster.String(), + ), + ) + } + } + + if hasDeployer { + hasCodeDeployer := statedb.GetCodeSize(*aatx.Deployer) != 0 + if !hasCodeDeployer { + return wrapError( + fmt.Errorf( + "deployer address %s is provided but contract has no code deployed", + aatx.Deployer.String(), + ), + ) + } + if hasCodeSender { + return wrapError( + fmt.Errorf( + "sender address %s and deployer address %s are provided but sender is already deployed", + aatx.Sender.String(), + aatx.Deployer.String(), + )) + } + } + + preTransactionGasCost, _ := aatx.PreTransactionGasCost() + if preTransactionGasCost > aatx.ValidationGasLimit { + return wrapError( + fmt.Errorf( + "insufficient ValidationGasLimit(%d) to cover PreTransactionGasCost(%d)", + aatx.ValidationGasLimit, preTransactionGasCost, + ), + ) + } + + if !hasDeployer && !hasCodeSender { + return wrapError( + fmt.Errorf( + "account is not deployed and no deployer is specified, account:%s", aatx.Sender.String(), + ), + ) + } + + return nil +} + func applyPaymasterValidationFrame(st *StateTransition, epc *EntryPointCall, tx *types.Transaction, signingHash common.Hash, header *types.Header) ([]byte, uint64, uint64, uint64, error) { /*** Paymaster Validation Frame ***/ aatx := tx.Rip7560TransactionData() var pmValidationUsedGas uint64 paymasterMsg, err := preparePaymasterValidationMessage(aatx, signingHash) if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil, false) + return nil, 0, 0, 0, wrapError(err) } if paymasterMsg == nil { return nil, 0, 0, 0, nil @@ -481,11 +566,19 @@ func applyPaymasterValidationFrame(st *StateTransition, epc *EntryPointCall, tx pmValidationUsedGas = resultPm.UsedGas apd, err := validatePaymasterEntryPointCall(epc, aatx.Paymaster) if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil, false) + return nil, 0, 0, 0, wrapError(err) } err = validateValidityTimeRange(header.Time, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64()) if err != nil { - return nil, 0, 0, 0, newValidationPhaseError(err, nil, nil, false) + return nil, 0, 0, 0, wrapError(err) + } + if len(apd.Context) > 0 && aatx.PostOpGas == 0 { + return nil, 0, 0, 0, wrapError( + fmt.Errorf( + "paymaster returned a context of size %d but the paymasterPostOpGasLimit is 0", + len(apd.Context), + ), + ) } return apd.Context, pmValidationUsedGas, apd.ValidAfter.Uint64(), apd.ValidUntil.Uint64(), nil } From 26e66e4d970d8f94edf10f937950201cb2e1fbd9 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Wed, 25 Sep 2024 20:10:51 +0200 Subject: [PATCH 70/73] Remove print and comment --- core/state_processor_rip7560.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 03e15abbc537..75e21d4ce567 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -82,7 +82,6 @@ func newValidationPhaseError( revertEntityName *string, frameReverted bool, ) *ValidationPhaseError { - // TODO: I have my doubts about this way of handling errors in Go. Is this a reasonable thing to do? var vpeCast *ValidationPhaseError if errors.As(innerErr, &vpeCast) { return vpeCast @@ -244,7 +243,6 @@ func BuyGasRip7560Transaction( } state.SubBalance(*chargeFrom, preCharge, 0) - println("BuyGasRip7560Transaction GP:", gp.String(), gasLimit) if err := gp.SubGas(gasLimit); err != nil { return 0, nil, newValidationPhaseError(err, nil, ptr("block gas limit"), false) } From c57db1eab9cf18e20e4486dcab7a7c227196d955 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Wed, 25 Sep 2024 20:14:37 +0200 Subject: [PATCH 71/73] Rename --- core/state_processor_rip7560.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 75e21d4ce567..8e937ba527d4 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -404,7 +404,7 @@ func ApplyRip7560ValidationPhases( if statedb.GetCodeSize(*sender) == 0 { return nil, newValidationPhaseError( fmt.Errorf( - "sender not deployed by factory, sender:%s factory:%s", + "sender not deployed by the deployer, sender:%s deployer:%s", sender.String(), aatx.Deployer.String(), ), nil, nil, false) } @@ -413,7 +413,7 @@ func ApplyRip7560ValidationPhases( if statedb.GetCodeSize(*sender) == 0 { return nil, newValidationPhaseError( fmt.Errorf( - "account is not deployed and no factory is specified, account:%s", sender.String(), + "account is not deployed and no deployer is specified, account:%s", sender.String(), ), nil, nil, false) } if !aatx.IsRip7712Nonce() { From 87379d3edd1d42b4d920a8e753cc745a59bfc139 Mon Sep 17 00:00:00 2001 From: Alex Forshtat Date: Mon, 21 Oct 2024 16:31:57 +0200 Subject: [PATCH 72/73] AA-446: Separate 'nonce' into two 256-bit values 'nonceKey' and 'nonceSequence' (#40) * AA-446: Separate 'nonce' into two 256-bit values 'nonceKey' and 'nonceSequence' * Fix nonce manager address * Print nonce manager address --------- Co-authored-by: shahafn --- core/rip7712_nonce.go | 6 +++--- core/state_processor_rip7560.go | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/core/rip7712_nonce.go b/core/rip7712_nonce.go index 1cdb9c33fdfa..3ca257eca7c1 100644 --- a/core/rip7712_nonce.go +++ b/core/rip7712_nonce.go @@ -9,13 +9,13 @@ import ( ) // TODO: accept address as configuration parameter -var AA_NONCE_MANAGER = common.HexToAddress("0x63f63e798f5F6A934Acf0a3FD1C01f3Fac851fF0") +var AA_NONCE_MANAGER = common.HexToAddress("0x632FaFb21910D6C8b4A3995063Dd984F2b829C02") func prepareNonceManagerMessage(tx *types.Rip7560AccountAbstractionTx) []byte { return slices.Concat( tx.Sender.Bytes(), - math.PaddedBigBytes(tx.NonceKey, 24), - math.PaddedBigBytes(big.NewInt(int64(tx.Nonce)), 8), + math.PaddedBigBytes(tx.NonceKey, 32), + math.PaddedBigBytes(big.NewInt(int64(tx.Nonce)), 32), ) } diff --git a/core/state_processor_rip7560.go b/core/state_processor_rip7560.go index 7e1d7213a633..fdf9ceba1b6a 100644 --- a/core/state_processor_rip7560.go +++ b/core/state_processor_rip7560.go @@ -293,6 +293,7 @@ func performNonceCheckFrameRip7712(st *StateTransition, tx *types.Rip7560Account if !st.evm.ChainConfig().IsRIP7712(st.evm.Context.BlockNumber) { return 0, wrapError(fmt.Errorf("RIP-7712 nonce is disabled")) } + fmt.Printf("performNonceCheckFrameRip7712: %s", AA_NONCE_MANAGER.String()) nonceManagerMessageData := prepareNonceManagerMessage(tx) resultNonceManager := CallFrame(st, &AA_ENTRY_POINT, &AA_NONCE_MANAGER, nonceManagerMessageData, st.gasRemaining) if resultNonceManager.Failed() { From 27459c9b3cd8548005893309b2fb3f238cc89f57 Mon Sep 17 00:00:00 2001 From: shahafn Date: Wed, 30 Oct 2024 13:55:06 +0100 Subject: [PATCH 73/73] Include ERC-7562 tracer Remove checking the 'OnlyTopCall' as it doesn't fit the use-case Remove most of the string-based opcode manipulation Fix Construct the correct tracer object Bring in 'OnTxEnd' and 'GetResult' functions Missing 'OnOpcode' hook native bundlerCollectorTracer Rename to erc7562Tracer Fixing tracer wip WIP: Make 'allowed opcodes' a configurable parameter with a hex string input WIP: Remove all inefficient code working with opcodes as strings erc7562 tracer wip Fixing pointer Fixing contract size type Fixing handleExtOpcodes Fixing stack pointers Changing UsedOpcodes type Fixing ignoredOpcodes Adding isCall(), fixing PR comments Remving callTracer, callTracerConfig, adding erc7562TracerConfig Detecting OOG in OnExit instead of OnOpcode Removing unused function Removing unused Changing Keccak member from array to mapping Replacing lastSeenOpcodes for lastOpWithStack --- core/vm/opcodes.go | 4 +- eth/tracers/native/erc7562.go | 554 ++++++++++++++++++ .../native/gen_callframewithopcodes_json.go | 137 +++++ 3 files changed, 693 insertions(+), 2 deletions(-) create mode 100644 eth/tracers/native/erc7562.go create mode 100644 eth/tracers/native/gen_callframewithopcodes_json.go diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index 2b9231fe1af2..722d4e2c6cbd 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -162,7 +162,7 @@ const ( // 0x80 range - dups. const ( - DUP1 = 0x80 + iota + DUP1 OpCode = 0x80 + iota DUP2 DUP3 DUP4 @@ -182,7 +182,7 @@ const ( // 0x90 range - swaps. const ( - SWAP1 = 0x90 + iota + SWAP1 OpCode = 0x90 + iota SWAP2 SWAP3 SWAP4 diff --git a/eth/tracers/native/erc7562.go b/eth/tracers/native/erc7562.go new file mode 100644 index 000000000000..276f33aa1329 --- /dev/null +++ b/eth/tracers/native/erc7562.go @@ -0,0 +1,554 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native + +import ( + "encoding/json" + "errors" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/log" + "github.com/holiman/uint256" + "math/big" + "runtime" + "runtime/debug" + "sync/atomic" +) + +//go:generate go run github.com/fjl/gencodec -type callFrameWithOpcodes -field-override callFrameWithOpcodesMarshaling -out gen_callframewithopcodes_json.go + +func init() { + tracers.DefaultDirectory.Register("erc7562Tracer", newErc7562Tracer, false) +} + +type contractSizeWithOpcode struct { + ContractSize int `json:"contractSize"` + Opcode vm.OpCode `json:"opcode"` +} + +type callFrameWithOpcodes struct { + Type vm.OpCode `json:"-"` + From common.Address `json:"from"` + Gas uint64 `json:"gas"` + GasUsed uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input []byte `json:"input" rlp:"optional"` + Output []byte `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + // Placed at end on purpose. The RLP will be decoded to 0 instead of + // nil if there are non-empty elements after in the struct. + Value *big.Int `json:"value,omitempty" rlp:"optional"` + revertedSnapshot bool + + AccessedSlots accessedSlots `json:"accessedSlots"` + ExtCodeAccessInfo []common.Address `json:"extCodeAccessInfo"` + UsedOpcodes map[vm.OpCode]uint64 `json:"usedOpcodes"` + ContractSize map[common.Address]*contractSizeWithOpcode `json:"contractSize"` + OutOfGas bool `json:"outOfGas"` + Calls []callFrameWithOpcodes `json:"calls,omitempty" rlp:"optional"` +} + +func (f callFrameWithOpcodes) TypeString() string { + return f.Type.String() +} + +func (f callFrameWithOpcodes) failed() bool { + return len(f.Error) > 0 && f.revertedSnapshot +} + +func (f *callFrameWithOpcodes) processOutput(output []byte, err error, reverted bool) { + output = common.CopyBytes(output) + // Clear error if tx wasn't reverted. This happened + // for pre-homestead contract storage OOG. + if err != nil && !reverted { + err = nil + } + if err == nil { + f.Output = output + return + } + f.Error = err.Error() + f.revertedSnapshot = reverted + if f.Type == vm.CREATE || f.Type == vm.CREATE2 { + f.To = nil + } + if !errors.Is(err, vm.ErrExecutionReverted) || len(output) == 0 { + return + } + f.Output = output + if len(output) < 4 { + return + } + if unpacked, err := abi.UnpackRevert(output); err == nil { + f.RevertReason = unpacked + } +} + +type callFrameWithOpcodesMarshaling struct { + TypeString string `json:"type"` + Gas hexutil.Uint64 + GasUsed hexutil.Uint64 + Value *hexutil.Big + Input hexutil.Bytes + Output hexutil.Bytes +} + +type accessedSlots struct { + Reads map[string][]string `json:"reads"` + Writes map[string]uint64 `json:"writes"` + TransientReads map[string]uint64 `json:"transientReads"` + TransientWrites map[string]uint64 `json:"transientWrites"` +} + +type opcodeWithPartialStack struct { + Opcode vm.OpCode + StackTopItems []uint256.Int +} + +type erc7562Tracer struct { + config erc7562TracerConfig + gasLimit uint64 + depth int + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption + env *tracing.VMContext + + ignoredOpcodes map[vm.OpCode]struct{} + callstackWithOpcodes []callFrameWithOpcodes + lastOpWithStack *opcodeWithPartialStack + Keccak map[string]struct{} `json:"keccak"` +} + +// catchPanic handles panic recovery and logs the panic and stack trace. +func catchPanic() { + if r := recover(); r != nil { + // Retrieve the function name + pc, _, _, _ := runtime.Caller(1) + funcName := runtime.FuncForPC(pc).Name() + + // Log the panic and function name + log.Error("Panic in", funcName, r) + debug.PrintStack() + } +} + +// newErc7562Tracer returns a native go tracer which tracks +// call frames of a tx, and implements vm.EVMLogger. +func newErc7562Tracer(ctx *tracers.Context, cfg json.RawMessage /*, chainConfig *params.ChainConfig*/) (*tracers.Tracer, error) { + t, err := newErc7562TracerObject(ctx, cfg) + if err != nil { + return nil, err + } + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnOpcode: t.OnOpcode, + OnTxEnd: t.OnTxEnd, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnLog: t.OnLog, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil +} + +type erc7562TracerConfig struct { + StackTopItemsSize int `json:"stackTopItemsSize"` + IgnoredOpcodes map[vm.OpCode]struct{} `json:"ignoredOpcodes"` // Opcodes to ignore during OnOpcode hook execution + WithLog bool `json:"withLog"` // If true, erc7562 tracer will collect event logs +} + +// Function to convert byte array to map[vm.OpCode]struct{} +func ConvertBytesToOpCodes(byteArray []byte) map[vm.OpCode]struct{} { + var opCodes map[vm.OpCode]struct{} + for _, b := range byteArray { + opCodes[vm.OpCode(b)] = struct{}{} + } + return opCodes +} + +func getFullConfiguration(partial erc7562TracerConfig) erc7562TracerConfig { + config := partial + + if config.IgnoredOpcodes == nil { + config.IgnoredOpcodes = defaultIgnoredOpcodes() + } + if config.StackTopItemsSize == 0 { + config.StackTopItemsSize = 3 + } + + return config +} + +func newErc7562TracerObject(ctx *tracers.Context, cfg json.RawMessage) (*erc7562Tracer, error) { + var config erc7562TracerConfig + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, err + } + } + // First callframe contains tx context info + // and is populated on start and end. + return &erc7562Tracer{ + callstackWithOpcodes: make([]callFrameWithOpcodes, 0, 1), + Keccak: make(map[string]struct{}), + config: getFullConfiguration(config), + }, nil +} + +func (t *erc7562Tracer) OnTxStart(env *tracing.VMContext, tx *types.Transaction, from common.Address) { + defer catchPanic() + t.env = env + t.gasLimit = tx.Gas() +} + +// OnEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *erc7562Tracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + defer catchPanic() + t.depth = depth + // Skip if tracing was interrupted + if t.interrupt.Load() { + return + } + + toCopy := to + call := callFrameWithOpcodes{ + Type: vm.OpCode(typ), + From: from, + To: &toCopy, + Input: common.CopyBytes(input), + Gas: gas, + Value: value, + AccessedSlots: accessedSlots{ + Reads: map[string][]string{}, + Writes: map[string]uint64{}, + TransientReads: map[string]uint64{}, + TransientWrites: map[string]uint64{}, + }, + UsedOpcodes: map[vm.OpCode]uint64{}, + ExtCodeAccessInfo: make([]common.Address, 0), + ContractSize: map[common.Address]*contractSizeWithOpcode{}, + } + if depth == 0 { + call.Gas = t.gasLimit + } + t.callstackWithOpcodes = append(t.callstackWithOpcodes, call) +} + +func (t *erc7562Tracer) captureEnd(output []byte, gasUsed uint64, err error, reverted bool) { + if len(t.callstackWithOpcodes) != 1 { + return + } + t.callstackWithOpcodes[0].processOutput(output, err, reverted) +} + +// OnExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *erc7562Tracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + defer catchPanic() + if depth == 0 { + t.captureEnd(output, gasUsed, err, reverted) + return + } + + t.depth = depth - 1 + + size := len(t.callstackWithOpcodes) + if size <= 1 { + return + } + // Pop call. + call := t.callstackWithOpcodes[size-1] + t.callstackWithOpcodes = t.callstackWithOpcodes[:size-1] + size -= 1 + + if errors.Is(err, vm.ErrCodeStoreOutOfGas) || errors.Is(err, vm.ErrOutOfGas) { + call.OutOfGas = true + } + call.GasUsed = gasUsed + call.processOutput(output, err, reverted) + // Nest call into parent. + t.callstackWithOpcodes[size-1].Calls = append(t.callstackWithOpcodes[size-1].Calls, call) +} + +func (t *erc7562Tracer) OnTxEnd(receipt *types.Receipt, err error) { + defer catchPanic() + // Error happened during tx validation. + if err != nil { + return + } + t.callstackWithOpcodes[0].GasUsed = receipt.GasUsed + if t.config.WithLog { + // Logs are not emitted when the call fails + t.clearFailedLogs(&t.callstackWithOpcodes[0], false) + } +} + +func (t *erc7562Tracer) OnLog(log1 *types.Log) { + defer catchPanic() + // Only logs need to be captured via opcode processing + if !t.config.WithLog { + return + } + // Skip if tracing was interrupted + if t.interrupt.Load() { + return + } + l := callLog{ + Address: log1.Address, + Topics: log1.Topics, + Data: log1.Data, + Position: hexutil.Uint(len(t.callstackWithOpcodes[len(t.callstackWithOpcodes)-1].Calls)), + } + t.callstackWithOpcodes[len(t.callstackWithOpcodes)-1].Logs = append(t.callstackWithOpcodes[len(t.callstackWithOpcodes)-1].Logs, l) +} + +// GetResult returns the json-encoded nested list of call traces, and any +// error arising from the encoding or forceful termination (via `Stop`). +func (t *erc7562Tracer) GetResult() (json.RawMessage, error) { + defer catchPanic() + if len(t.callstackWithOpcodes) != 1 { + return nil, errors.New("incorrect number of top-level calls") + } + + callFrameJSON, err := json.Marshal(t.callstackWithOpcodes[0]) + + // Unmarshal the generated JSON into a map + var resultMap map[string]interface{} + if err := json.Unmarshal(callFrameJSON, &resultMap); err != nil { + return nil, err + } + + // Converting keccak mapping to array + keccakArray := make([]hexutil.Bytes, len(t.Keccak)) + i := 0 + for k := range t.Keccak { + keccakArray[i] = hexutil.Bytes(k) + i++ + } + resultMap["keccak"] = keccakArray + + // Marshal the final map back to JSON + finalJSON, err := json.Marshal(resultMap) + if err != nil { + return nil, err + } + return finalJSON, t.reason +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (t *erc7562Tracer) Stop(err error) { + defer catchPanic() + t.reason = err + t.interrupt.Store(true) +} + +// clearFailedLogs clears the logs of a callframe and all its children +// in case of execution failure. +func (t *erc7562Tracer) clearFailedLogs(cf *callFrameWithOpcodes, parentFailed bool) { + failed := cf.failed() || parentFailed + // Clear own logs + if failed { + cf.Logs = nil + } + for i := range cf.Calls { + t.clearFailedLogs(&cf.Calls[i], failed) + } +} + +func (t *erc7562Tracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + defer catchPanic() + opcode := vm.OpCode(op) + var opcodeWithStack *opcodeWithPartialStack + stackSize := len(scope.StackData()) + var stackTopItems []uint256.Int + for i := 0; i < t.config.StackTopItemsSize && i < stackSize; i++ { + stackTopItems = append(stackTopItems, *peepStack(scope.StackData(), i)) + } + opcodeWithStack = &opcodeWithPartialStack{ + Opcode: opcode, + StackTopItems: stackTopItems, + } + t.handleReturnRevert(opcode) + size := len(t.callstackWithOpcodes) + currentCallFrame := &t.callstackWithOpcodes[size-1] + if t.lastOpWithStack != nil { + t.handleExtOpcodes(opcode, currentCallFrame) + } + t.handleAccessedContractSize(opcode, scope, currentCallFrame) + if t.lastOpWithStack != nil { + t.handleGasObserved(opcode, currentCallFrame) + } + t.storeUsedOpcode(opcode, currentCallFrame) + t.handleStorageAccess(opcode, scope, currentCallFrame) + t.storeKeccak(opcode, scope) + t.lastOpWithStack = opcodeWithStack +} + +func (t *erc7562Tracer) handleReturnRevert(opcode vm.OpCode) { + if opcode == vm.REVERT || opcode == vm.RETURN { + t.lastOpWithStack = nil + } +} + +func (t *erc7562Tracer) handleGasObserved(opcode vm.OpCode, currentCallFrame *callFrameWithOpcodes) { + // [OP-012] + pendingGasObserved := t.lastOpWithStack.Opcode == vm.GAS && !isCall(opcode) + if pendingGasObserved { + incrementCount(currentCallFrame.UsedOpcodes, vm.GAS) + } +} + +func (t *erc7562Tracer) storeUsedOpcode(opcode vm.OpCode, currentCallFrame *callFrameWithOpcodes) { + // ignore "unimportant" opcodes + if opcode != vm.GAS && !t.isIgnoredOpcode(opcode) { + incrementCount(currentCallFrame.UsedOpcodes, opcode) + } +} + +func (t *erc7562Tracer) handleStorageAccess(opcode vm.OpCode, scope tracing.OpContext, currentCallFrame *callFrameWithOpcodes) { + if opcode == vm.SLOAD || opcode == vm.SSTORE || opcode == vm.TLOAD || opcode == vm.TSTORE { + slot := common.BytesToHash(peepStack(scope.StackData(), 0).Bytes()) + slotHex := slot.Hex() + addr := scope.Address() + + if opcode == vm.SLOAD { + // read slot values before this UserOp was created + // (so saving it if it was written before the first read) + _, rOk := currentCallFrame.AccessedSlots.Reads[slotHex] + _, wOk := currentCallFrame.AccessedSlots.Writes[slotHex] + if !rOk && !wOk { + currentCallFrame.AccessedSlots.Reads[slotHex] = append(currentCallFrame.AccessedSlots.Reads[slotHex], t.env.StateDB.GetState(addr, slot).Hex()) + } + } else if opcode == vm.SSTORE { + incrementCount(currentCallFrame.AccessedSlots.Writes, slotHex) + } else if opcode == vm.TLOAD { + incrementCount(currentCallFrame.AccessedSlots.TransientReads, slotHex) + } else { + incrementCount(currentCallFrame.AccessedSlots.TransientWrites, slotHex) + } + } +} + +func (t *erc7562Tracer) storeKeccak(opcode vm.OpCode, scope tracing.OpContext) { + if opcode == vm.KECCAK256 { + dataOffset := peepStack(scope.StackData(), 0).Uint64() + dataLength := peepStack(scope.StackData(), 1).Uint64() + memory := scope.MemoryData() + keccak := make([]byte, dataLength) + copy(keccak, memory[dataOffset:dataOffset+dataLength]) + t.Keccak[string(keccak)] = struct{}{} + } +} + +func (t *erc7562Tracer) handleExtOpcodes(opcode vm.OpCode, currentCallFrame *callFrameWithOpcodes) { + if isEXT(t.lastOpWithStack.Opcode) { + addr := common.HexToAddress(t.lastOpWithStack.StackTopItems[0].Hex()) + + // only store the last EXTCODE* opcode per address - could even be a boolean for our current use-case + // [OP-051] + + if !(t.lastOpWithStack.Opcode == vm.EXTCODESIZE && opcode == vm.ISZERO) { + currentCallFrame.ExtCodeAccessInfo = append(currentCallFrame.ExtCodeAccessInfo, addr) + } + } +} + +func (t *erc7562Tracer) handleAccessedContractSize(opcode vm.OpCode, scope tracing.OpContext, currentCallFrame *callFrameWithOpcodes) { + // [OP-041] + if isEXTorCALL(opcode) { + n := 0 + if !isEXT(opcode) { + n = 1 + } + addr := common.BytesToAddress(peepStack(scope.StackData(), n).Bytes()) + if _, ok := currentCallFrame.ContractSize[addr]; !ok && !isAllowedPrecompile(addr) { + currentCallFrame.ContractSize[addr] = &contractSizeWithOpcode{ + ContractSize: len(t.env.StateDB.GetCode(addr)), + Opcode: opcode, + } + } + } +} + +func peepStack(stackData []uint256.Int, n int) *uint256.Int { + return &stackData[len(stackData)-n-1] +} + +func isEXTorCALL(opcode vm.OpCode) bool { + return isEXT(opcode) || isCall(opcode) +} + +func isEXT(opcode vm.OpCode) bool { + return opcode == vm.EXTCODEHASH || + opcode == vm.EXTCODESIZE || + opcode == vm.EXTCODECOPY +} + +func isCall(opcode vm.OpCode) bool { + return opcode == vm.CALL || + opcode == vm.CALLCODE || + opcode == vm.DELEGATECALL || + opcode == vm.STATICCALL +} + +// Check if this opcode is ignored for the purposes of generating the used opcodes report +func (t *erc7562Tracer) isIgnoredOpcode(opcode vm.OpCode) bool { + if _, ok := t.ignoredOpcodes[opcode]; ok { + return true + } + return false +} + +func defaultIgnoredOpcodes() map[vm.OpCode]struct{} { + ignored := make(map[vm.OpCode]struct{}) + + // Allow all PUSHx, DUPx and SWAPx opcodes as they have sequential codes + for op := vm.PUSH0; op < vm.SWAP16; op++ { + ignored[op] = struct{}{} + } + + for _, op := range []vm.OpCode{ + vm.POP, vm.ADD, vm.SUB, vm.MUL, + vm.DIV, vm.EQ, vm.LT, vm.GT, + vm.SLT, vm.SGT, vm.SHL, vm.SHR, + vm.AND, vm.OR, vm.NOT, vm.ISZERO, + } { + ignored[op] = struct{}{} + } + + return ignored +} + +// not using 'isPrecompiled' to only allow the ones defined by the ERC-7562 as stateless precompiles +// [OP-062] +func isAllowedPrecompile(addr common.Address) bool { + addrInt := addr.Big() + return addrInt.Cmp(big.NewInt(0)) == 1 && addrInt.Cmp(big.NewInt(10)) == -1 +} + +func incrementCount[K comparable](m map[K]uint64, k K) { + m[k] = m[k] + 1 +} diff --git a/eth/tracers/native/gen_callframewithopcodes_json.go b/eth/tracers/native/gen_callframewithopcodes_json.go new file mode 100644 index 000000000000..1602eb2a2e72 --- /dev/null +++ b/eth/tracers/native/gen_callframewithopcodes_json.go @@ -0,0 +1,137 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package native + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/vm" +) + +var _ = (*callFrameWithOpcodesMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (c callFrameWithOpcodes) MarshalJSON() ([]byte, error) { + type callFrameWithOpcodes0 struct { + Type vm.OpCode `json:"-"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasUsed hexutil.Uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input hexutil.Bytes `json:"input" rlp:"optional"` + Output hexutil.Bytes `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` + AccessedSlots accessedSlots `json:"accessedSlots"` + ExtCodeAccessInfo []common.Address `json:"extCodeAccessInfo"` + UsedOpcodes map[vm.OpCode]uint64 `json:"usedOpcodes"` + ContractSize map[common.Address]*contractSizeWithOpcode `json:"contractSize"` + OutOfGas bool `json:"outOfGas"` + Calls []callFrameWithOpcodes `json:"calls,omitempty" rlp:"optional"` + TypeString string `json:"type"` + } + var enc callFrameWithOpcodes0 + enc.Type = c.Type + enc.From = c.From + enc.Gas = hexutil.Uint64(c.Gas) + enc.GasUsed = hexutil.Uint64(c.GasUsed) + enc.To = c.To + enc.Input = c.Input + enc.Output = c.Output + enc.Error = c.Error + enc.RevertReason = c.RevertReason + enc.Logs = c.Logs + enc.Value = (*hexutil.Big)(c.Value) + enc.AccessedSlots = c.AccessedSlots + enc.ExtCodeAccessInfo = c.ExtCodeAccessInfo + enc.UsedOpcodes = c.UsedOpcodes + enc.ContractSize = c.ContractSize + enc.OutOfGas = c.OutOfGas + enc.Calls = c.Calls + enc.TypeString = c.TypeString() + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (c *callFrameWithOpcodes) UnmarshalJSON(input []byte) error { + type callFrameWithOpcodes0 struct { + Type *vm.OpCode `json:"-"` + From *common.Address `json:"from"` + Gas *hexutil.Uint64 `json:"gas"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input *hexutil.Bytes `json:"input" rlp:"optional"` + Output *hexutil.Bytes `json:"output,omitempty" rlp:"optional"` + Error *string `json:"error,omitempty" rlp:"optional"` + RevertReason *string `json:"revertReason,omitempty"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` + AccessedSlots *accessedSlots `json:"accessedSlots"` + ExtCodeAccessInfo []common.Address `json:"extCodeAccessInfo"` + UsedOpcodes map[vm.OpCode]uint64 `json:"usedOpcodes"` + ContractSize map[common.Address]*contractSizeWithOpcode `json:"contractSize"` + OutOfGas *bool `json:"outOfGas"` + Calls []callFrameWithOpcodes `json:"calls,omitempty" rlp:"optional"` + } + var dec callFrameWithOpcodes0 + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Type != nil { + c.Type = *dec.Type + } + if dec.From != nil { + c.From = *dec.From + } + if dec.Gas != nil { + c.Gas = uint64(*dec.Gas) + } + if dec.GasUsed != nil { + c.GasUsed = uint64(*dec.GasUsed) + } + if dec.To != nil { + c.To = dec.To + } + if dec.Input != nil { + c.Input = *dec.Input + } + if dec.Output != nil { + c.Output = *dec.Output + } + if dec.Error != nil { + c.Error = *dec.Error + } + if dec.RevertReason != nil { + c.RevertReason = *dec.RevertReason + } + if dec.Logs != nil { + c.Logs = dec.Logs + } + if dec.Value != nil { + c.Value = (*big.Int)(dec.Value) + } + if dec.AccessedSlots != nil { + c.AccessedSlots = *dec.AccessedSlots + } + if dec.ExtCodeAccessInfo != nil { + c.ExtCodeAccessInfo = dec.ExtCodeAccessInfo + } + if dec.UsedOpcodes != nil { + c.UsedOpcodes = dec.UsedOpcodes + } + if dec.ContractSize != nil { + c.ContractSize = dec.ContractSize + } + if dec.OutOfGas != nil { + c.OutOfGas = *dec.OutOfGas + } + if dec.Calls != nil { + c.Calls = dec.Calls + } + return nil +}