diff --git a/eth/api_backend.go b/eth/api_backend.go index a9423a0ff9..44bb433f8d 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/eth/relay" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -54,6 +55,8 @@ type EthAPIBackend struct { allowUnprotectedTxs bool eth *Ethereum gpo *gasprice.Oracle + + relay *relay.RelayService } // ChainConfig returns the active chain configuration. @@ -481,6 +484,10 @@ func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transac return b.eth.txPool.ContentFrom(addr) } +func (b *EthAPIBackend) TxStatus(hash common.Hash) txpool.TxStatus { + return b.eth.txPool.Status(hash) +} + func (b *EthAPIBackend) TxPool() *txpool.TxPool { return b.eth.txPool } @@ -720,3 +727,40 @@ func (b *EthAPIBackend) RPCTxSyncDefaultTimeout() time.Duration { func (b *EthAPIBackend) RPCTxSyncMaxTimeout() time.Duration { return b.eth.config.TxSyncMaxTimeout } + +// Preconf / Private tx related API for relay +func (b *EthAPIBackend) PreconfEnabled() bool { + return b.relay.PreconfEnabled() +} +func (b *EthAPIBackend) SubmitTxForPreconf(tx *types.Transaction) error { + return b.relay.SubmitPreconfTransaction(tx) +} + +func (b *EthAPIBackend) CheckPreconfStatus(hash common.Hash) (bool, error) { + return b.relay.CheckPreconfStatus(hash) +} + +func (b *EthAPIBackend) PrivateTxEnabled() bool { + return b.relay.PrivateTxEnabled() +} + +func (b *EthAPIBackend) SubmitPrivateTx(tx *types.Transaction) error { + return b.relay.SubmitPrivateTransaction(tx) +} + +// Preconf / Private tx related API for block producers +func (b *EthAPIBackend) AcceptPreconfTxs() bool { + return b.relay.AcceptPreconfTxs() +} + +func (b *EthAPIBackend) AcceptPrivateTxs() bool { + return b.relay.AcceptPrivateTxs() +} + +func (b *EthAPIBackend) RecordPrivateTx(hash common.Hash) { + b.relay.RecordPrivateTx(hash) +} + +func (b *EthAPIBackend) PurgePrivateTx(hash common.Hash) { + b.relay.PurgePrivateTx(hash) +} diff --git a/eth/backend.go b/eth/backend.go index be0f82ba94..20a574c51c 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -54,6 +54,7 @@ import ( "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/wit" + "github.com/ethereum/go-ethereum/eth/relay" "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -228,14 +229,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { closeCh: make(chan struct{}), } + relayService := relay.Init(config.EnablePreconfs, config.EnablePrivateTx, config.AcceptPreconfTx, config.AcceptPrivateTx, config.BlockProducerRpcEndpoints) + privateTxGetter := relayService.GetPrivateTxGetter() + // START: Bor changes - eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} + eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil, relayService} if eth.APIBackend.allowUnprotectedTxs { - log.Info("------Unprotected transactions allowed-------") + log.Info("Unprotected transactions allowed") config.TxPool.AllowUnprotectedTxs = true } - gpoParams := config.GPO + // Set transaction getter for relay service to query local database + relayService.SetTxGetter(eth.APIBackend.GetCanonicalTransaction) blockChainAPI := ethapi.NewBlockChainAPI(eth.APIBackend) engine, err := ethconfig.CreateConsensusEngine(config.Genesis.Config, config, chainDb, blockChainAPI) @@ -327,6 +332,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { eth.blockchain, err = core.NewBlockChain(chainDb, config.Genesis, eth.engine, options) } + // Set the chain head event subscription function for private tx store + relayService.SetchainEventSubFn(eth.blockchain.SubscribeChainEvent) + // Set parallel stateless import toggle on blockchain if err == nil && eth.blockchain != nil && config.EnableParallelStatelessImport { eth.blockchain.ParallelStatelessImportEnable() @@ -342,18 +350,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // Set blockchain reference for fork detection in whitelist service checker.SetBlockchain(eth.blockchain) - // 1.14.8: NewOracle function definition was changed to accept (startPrice *big.Int) param. - eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams, config.Miner.GasPrice) - - // bor: this is nor present in geth - /* - _ = eth.engine.VerifyHeader(eth.blockchain, eth.blockchain.CurrentHeader()) // TODO think on it - */ - - // BOR changes - eth.APIBackend.gpo.ProcessCache() - // BOR changes - // Initialize filtermaps log index. fmConfig := filtermaps.Config{ History: config.LogHistory, @@ -423,11 +419,13 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { checker: checker, enableBlockTracking: eth.config.EnableBlockTracking, txAnnouncementOnly: eth.p2pServer.TxAnnouncementOnly, + disableTxPropagation: eth.p2pServer.DisableTxPropagation, witnessProtocol: eth.config.WitnessProtocol, syncWithWitnesses: eth.config.SyncWithWitnesses, syncAndProduceWitnesses: eth.config.SyncAndProduceWitnesses, fastForwardThreshold: config.FastForwardThreshold, p2pServer: eth.p2pServer, + privateTxGetter: privateTxGetter, }); err != nil { return nil, err } @@ -440,12 +438,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { eth.miner.SetPrioAddresses(config.TxPool.Locals) } - eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} - if eth.APIBackend.allowUnprotectedTxs { - log.Info("Unprotected transactions allowed") - } // 1.14.8: NewOracle function definition was changed to accept (startPrice *big.Int) param. eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, config.GPO, config.Miner.GasPrice) + eth.APIBackend.gpo.ProcessCache() // Start the RPC service eth.netRPCService = ethapi.NewNetAPI(eth.p2pServer, config.NetworkId) @@ -1010,6 +1005,11 @@ func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. s.discmix.Close() + // Close the tx relay service if enabled + if s.APIBackend.relay != nil { + s.APIBackend.relay.Close() + } + // Close the engine before handler else it may cause a deadlock where // the heimdall is unresponsive and the syncing loop keeps waiting // for a response and is unable to proceed to exit `Finalize` during diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 3dd06f150d..9a0ae699fb 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -288,6 +288,15 @@ type Config struct { // EIP-7966: eth_sendRawTransactionSync timeouts TxSyncDefaultTimeout time.Duration `toml:",omitempty"` TxSyncMaxTimeout time.Duration `toml:",omitempty"` + + // Preconf / Private transaction relay related settings + EnablePreconfs bool + EnablePrivateTx bool + BlockProducerRpcEndpoints []string + + // Preconf / Private transaction related settings for block producers + AcceptPreconfTx bool + AcceptPrivateTx bool } // CreateConsensusEngine creates a consensus engine for the given chain configuration. diff --git a/eth/handler.go b/eth/handler.go index 1779caafd9..9aca5bda9f 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -44,6 +44,7 @@ import ( "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/wit" + "github.com/ethereum/go-ethereum/eth/relay" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" @@ -120,12 +121,14 @@ type handlerConfig struct { EthAPI *ethapi.BlockChainAPI // EthAPI to interact enableBlockTracking bool // Whether to log information collected while tracking block lifecycle txAnnouncementOnly bool // Whether to only announce txs to peers + disableTxPropagation bool // Whether to disable broadcasting and announcement of txs to peers witnessProtocol bool // Whether to enable witness protocol syncWithWitnesses bool // Whether to sync blocks with witnesses syncAndProduceWitnesses bool // Whether to sync blocks and produce witnesses simultaneously fastForwardThreshold uint64 // Minimum necessary distance between local header and peer to fast forward gasCeil uint64 // Gas ceiling for dynamic witness page threshold calculation p2pServer *p2p.Server // P2P server for jailing peers + privateTxGetter relay.PrivateTxGetter // privateTxGetter to check if a transaction needs to be treated as private or not } type handler struct { @@ -150,6 +153,9 @@ type handler struct { ethAPI *ethapi.BlockChainAPI // EthAPI to interact + // privateTxGetter to check if a transaction needs to be treated as private or not + privateTxGetter relay.PrivateTxGetter + eventMux *event.TypeMux txsCh chan core.NewTxsEvent txsSub event.Subscription @@ -160,9 +166,11 @@ type handler struct { requiredBlocks map[uint64]common.Hash - enableBlockTracking bool - txAnnouncementOnly bool - p2pServer *p2p.Server // P2P server for jailing peers + enableBlockTracking bool + txAnnouncementOnly bool + disableTxPropagation bool + + p2pServer *p2p.Server // P2P server for jailing peers // Witness protocol related fields syncWithWitnesses bool @@ -199,12 +207,14 @@ func newHandler(config *handlerConfig) (*handler, error) { requiredBlocks: config.RequiredBlocks, enableBlockTracking: config.enableBlockTracking, txAnnouncementOnly: config.txAnnouncementOnly, + disableTxPropagation: config.disableTxPropagation, p2pServer: config.p2pServer, quitSync: make(chan struct{}), handlerDoneCh: make(chan struct{}), handlerStartCh: make(chan struct{}), syncWithWitnesses: config.syncWithWitnesses, syncAndProduceWitnesses: config.syncAndProduceWitnesses, + privateTxGetter: config.privateTxGetter, } log.Info("Sync with witnesses", "enabled", config.syncWithWitnesses) @@ -423,9 +433,12 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { } h.chainSync.handlePeerEvent() - // Propagate existing transactions. new transactions appearing - // after this will be sent via broadcasts. - h.syncTransactions(peer) + // Bor: skip propagating transactions if flag is set + if !h.disableTxPropagation { + // Propagate existing transactions. new transactions appearing + // after this will be sent via broadcasts. + h.syncTransactions(peer) + } // Create a notification channel for pending requests if the peer goes down dead := make(chan struct{}) @@ -579,17 +592,27 @@ func (h *handler) unregisterPeer(id string) { func (h *handler) Start(maxPeers int) { h.maxPeers = maxPeers - // broadcast and announce transactions (only new ones, not resurrected ones) - h.wg.Add(1) - h.txsCh = make(chan core.NewTxsEvent, txChanSize) - h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) - go h.txBroadcastLoop() + if h.disableTxPropagation { + log.Info("Disabling transaction propagation completely") + } + + // Bor: block producers can choose to not propagate transactions to save p2p overhead + // broadcast and announce transactions (only new ones, not resurrected ones) only + // if transaction propagation is enabled + if !h.disableTxPropagation { + h.wg.Add(1) + h.txsCh = make(chan core.NewTxsEvent, txChanSize) + h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) + go h.txBroadcastLoop() + } // rebroadcast stuck transactions - h.wg.Add(1) - h.stuckTxsCh = make(chan core.StuckTxsEvent, txChanSize) - h.stuckTxsSub = h.txpool.SubscribeRebroadcastTransactions(h.stuckTxsCh) - go h.stuckTxBroadcastLoop() + if !h.disableTxPropagation { + h.wg.Add(1) + h.stuckTxsCh = make(chan core.StuckTxsEvent, txChanSize) + h.stuckTxsSub = h.txpool.SubscribeRebroadcastTransactions(h.stuckTxsCh) + go h.stuckTxBroadcastLoop() + } // broadcast mined blocks h.wg.Add(1) @@ -610,8 +633,12 @@ func (h *handler) Start(maxPeers int) { } func (h *handler) Stop() { - h.txsSub.Unsubscribe() // quits txBroadcastLoop - h.stuckTxsSub.Unsubscribe() // quits stuckTxBroadcastLoop + if h.txsSub != nil { + h.txsSub.Unsubscribe() // quits txBroadcastLoop + } + if h.stuckTxsSub != nil { + h.stuckTxsSub.Unsubscribe() // quits stuckTxBroadcastLoop + } h.minedBlockSub.Unsubscribe() h.blockRange.stop() @@ -736,6 +763,11 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { ) for _, tx := range txs { + // Skip gossip if transaction is marked as private + if h.privateTxGetter != nil && h.privateTxGetter.IsTxPrivate(tx.Hash()) { + log.Debug("[tx-relay] skip tx broadcast for private tx", "hash", tx.Hash()) + continue + } var directSet map[*ethPeer]struct{} switch { case tx.Type() == types.BlobTxType: diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 2693453884..82ece38236 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -20,6 +20,8 @@ import ( "fmt" "math/big" "reflect" + "sync" + "sync/atomic" "testing" "time" "unsafe" @@ -37,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" ) // testEthHandler is a mock event handler to listen for inbound network requests @@ -708,6 +711,265 @@ func testBroadcastMalformedBlock(t *testing.T, protocol uint) { } } +// Tests that when tx propagation is completely disabled, no transactions +// are propagated to connected peers. +func TestDisableTxPropagation68(t *testing.T) { + testDisableTxPropagation(t, eth.ETH68) +} +func TestDisableTxPropagation69(t *testing.T) { + testDisableTxPropagation(t, eth.ETH69) +} + +func testDisableTxPropagation(t *testing.T, protocol uint) { + t.Parallel() + + // Disable tx propagation on the source handler + updateConfig := func(cfg *handlerConfig) *handlerConfig { + cfg.disableTxPropagation = true + return cfg + } + + // Create a source handler to send transactions from and a number of sinks + // to receive them. We need multiple sinks to ensure none of them gets + // any transactions. + source := newTestHandlerWithConfig(updateConfig) + source.handler.snapSync.Store(false) // Avoid requiring snap, otherwise some will be dropped below + defer source.close() + + // Fill the source pool with transactions + txs := make([]*types.Transaction, 10) + for nonce := range txs { + tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + txs[nonce] = tx + } + source.txpool.Add(txs[:5], false) + + sinks := make([]*testHandler, 10) + for i := 0; i < len(sinks); i++ { + sinks[i] = newTestHandler() + defer sinks[i].close() + + sinks[i].handler.synced.Store(true) // mark synced to accept transactions + } + + // Interconnect all the sink handlers with the source handler + for i, sink := range sinks { + sourcePipe, sinkPipe := p2p.MsgPipe() + defer sourcePipe.Close() + defer sinkPipe.Close() + + sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) + sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) + defer sourcePeer.Close() + defer sinkPeer.Close() + + go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(source.handler), peer) + }) + go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(sink.handler), peer) + }) + } + + // Subscribe to all the transaction pools + txChs := make([]chan core.NewTxsEvent, len(sinks)) + for i := 0; i < len(sinks); i++ { + txChs[i] = make(chan core.NewTxsEvent, 10) + + sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false) + defer sub.Unsubscribe() + } + + var wg sync.WaitGroup + + // Transactions are propagated during initial sync via `runEthPeer`. As the + // source has disabled tx propagation, ensure that none of the sinks receive + // any transactions. + for i := range sinks { + wg.Add(1) + go func(idx int) { + defer wg.Done() + select { + case <-txChs[idx]: + t.Errorf("sink %d: received transactions even when tx propagation is completely disabled", idx) + case <-time.After(2 * time.Second): + // Expected: timeout without receiving any transactions + } + }(i) + } + wg.Wait() + + // Transactions are also propagated via broadcast and announcement loops. Ensure that + // none of the receive any transactions. + source.txpool.Add(txs[5:], false) + for i := range sinks { + wg.Add(1) + go func(idx int) { + defer wg.Done() + select { + case <-txChs[idx]: + t.Errorf("sink %d: received transactions even when tx propagation is completely disabled", idx) + case <-time.After(2 * time.Second): + // Expected: timeout without receiving any transactions + } + }(i) + } + wg.Wait() +} + +// A simple private tx store for tests +type PrivateTxStore struct { + mu sync.RWMutex + store map[common.Hash]struct{} +} + +func (p *PrivateTxStore) IsTxPrivate(hash common.Hash) bool { + p.mu.RLock() + defer p.mu.RUnlock() + _, ok := p.store[hash] + return ok +} + +// Tests that when a tx is set to private, it's not propagated to any +// connected peers. +func TestPrivateTxNotPropagated68(t *testing.T) { + testPrivateTxNotPropagated(t, eth.ETH68) +} +func TestPrivateTxNotPropagated69(t *testing.T) { + testPrivateTxNotPropagated(t, eth.ETH69) +} + +func testPrivateTxNotPropagated(t *testing.T, protocol uint) { + t.Parallel() + + // Initialize a private tx store + privateTxStore := &PrivateTxStore{store: make(map[common.Hash]struct{})} + + // Set the private tx store getter on the source handler + updateConfig := func(cfg *handlerConfig) *handlerConfig { + cfg.privateTxGetter = privateTxStore + return cfg + } + + // Create a source handler to send transactions from and a number of sinks + // to receive them. We need multiple sinks to ensure none of them gets + // any transactions. + source := newTestHandlerWithConfig(updateConfig) + source.handler.snapSync.Store(false) // Avoid requiring snap, otherwise some will be dropped below + defer source.close() + + // Fill the source pool with transactions + txs := make([]*types.Transaction, 10) + for nonce := range txs { + tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + txs[nonce] = tx + } + source.txpool.Add(txs[:5], false) + + // Mark some transactions as private + privateTxStore.mu.Lock() + privateTxStore.store[txs[3].Hash()] = struct{}{} + privateTxStore.store[txs[4].Hash()] = struct{}{} + privateTxStore.mu.Unlock() + + sinks := make([]*testHandler, 10) + for i := 0; i < len(sinks); i++ { + sinks[i] = newTestHandler() + defer sinks[i].close() + + sinks[i].handler.synced.Store(true) // mark synced to accept transactions + } + + // Interconnect all the sink handlers with the source handler + for i, sink := range sinks { + sourcePipe, sinkPipe := p2p.MsgPipe() + defer sourcePipe.Close() + defer sinkPipe.Close() + + sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) + sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) + defer sourcePeer.Close() + defer sinkPeer.Close() + + go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(source.handler), peer) + }) + go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(sink.handler), peer) + }) + } + + // Subscribe to all the transaction pools + txChs := make([]chan core.NewTxsEvent, len(sinks)) + for i := 0; i < len(sinks); i++ { + txChs[i] = make(chan core.NewTxsEvent, 10) + + sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false) + defer sub.Unsubscribe() + } + + var wg sync.WaitGroup + + // Transactions are propagated during initial sync via `runEthPeer`. As the + // source has disabled tx propagation, ensure that none of the sinks receive + // any transactions. + var txReceivedCount atomic.Uint64 + for i := range sinks { + wg.Add(1) + go func(idx int) { + defer wg.Done() + select { + case txs := <-txChs[idx]: + txReceivedCount.Add(uint64(len(txs.Txs))) + for _, tx := range txs.Txs { + // Ensure no private txs are received + if _, ok := privateTxStore.store[tx.Hash()]; ok { + t.Errorf("sink %d: received private transaction %x", idx, tx.Hash()) + } + } + case <-time.After(2 * time.Second): + t.Errorf("sink %d: transaction propagation timed out", idx) + } + }(i) + } + wg.Wait() + require.Equal(t, txReceivedCount.Load(), uint64(len(sinks)*3), "sinks should have received only public transactions") + + // Transactions are also propagated via broadcast and announcement loops. Ensure that + // none of the receive any transactions. + source.txpool.Add(txs[5:], false) + + // Mark some transactions as private + privateTxStore.mu.Lock() + privateTxStore.store[txs[8].Hash()] = struct{}{} + privateTxStore.store[txs[9].Hash()] = struct{}{} + privateTxStore.mu.Unlock() + + txReceivedCount.Store(0) + for i := range sinks { + wg.Add(1) + go func(idx int) { + defer wg.Done() + select { + case txs := <-txChs[idx]: + txReceivedCount.Add(uint64(len(txs.Txs))) + for _, tx := range txs.Txs { + // Ensure no private txs are received + if _, ok := privateTxStore.store[tx.Hash()]; ok { + t.Errorf("sink %d: received private transaction %x", idx, tx.Hash()) + } + } + case <-time.After(2 * time.Second): + t.Errorf("sink %d: transaction propagation timed out", idx) + } + }(i) + } + wg.Wait() + require.Equal(t, txReceivedCount.Load(), uint64(len(sinks)*3), "sinks should have received only public transactions") +} + // TestCreateWitnessRequester tests the createWitnessRequester helper func TestCreateWitnessRequester(t *testing.T) { handler := newTestHandler() diff --git a/eth/handler_test.go b/eth/handler_test.go index 072b887574..203b637a89 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -230,6 +230,44 @@ func newTestHandlerWithBlocks(blocks int) *testHandler { } } +func newTestHandlerWithConfig(updateConfig func(*handlerConfig) *handlerConfig) *testHandler { + // Create a database pre-initialize with a genesis block + db := rawdb.NewMemoryDatabase() + gspec := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, + } + chain, _ := core.NewBlockChain(db, gspec, ethash.NewFaker(), nil) + + _, bs, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 0, nil) + if _, err := chain.InsertChain(bs, false); err != nil { + panic(err) + } + + txpool := newTestTxPool() + + config := &handlerConfig{ + Database: db, + Chain: chain, + TxPool: txpool, + Network: 1, + Sync: downloader.SnapSync, + BloomCache: 1, + } + if updateConfig != nil { + config = updateConfig(config) + } + handler, _ := newHandler(config) + handler.Start(1000) + + return &testHandler{ + db: db, + chain: chain, + txpool: txpool, + handler: handler, + } +} + // close tears down the handler and all its internal constructs. func (b *testHandler) close() { b.handler.Stop() diff --git a/eth/relay/CLAUDE.md b/eth/relay/CLAUDE.md new file mode 100644 index 0000000000..8c7fa14e0d --- /dev/null +++ b/eth/relay/CLAUDE.md @@ -0,0 +1,198 @@ +# Relay Service - Transaction Preconfirmation and Private Transaction Relay + +## Overview + +The relay service provides infrastructure for submitting transactions to block producers with two key features: +1. **Preconfirmation (Preconf)**: Submit transactions to block producers and get acknowledgment that they will be included in upcoming blocks +2. **Private Transactions**: Submit transactions privately to block producers without broadcasting to the public mempool + +This service acts as a relay between the node and multiple block producer RPC endpoints, handling parallel submissions, retries, status tracking, and caching. + +## Architecture + +### Core Components + +``` +eth/relay/ +├── relay.go # Main service wrapper and public API +├── service.go # Core service with task queue and cache +├── multiclient.go # Multi-RPC client for parallel submissions +├── private_tx_store.go # Private transaction tracking store +└── *_test.go # Test suite +``` + +### Package Structure + +**relay.go** +- `RelayService`: Main service wrapper that coordinates all relay functionality +- Public API: `SubmitPreconfTransaction()`, `CheckPreconfStatus()`, `SubmitPrivateTransaction()` +- Configuration: Manages enable/accept flags for preconf and private tx features + +**service.go** +- `Service`: Core service managing task queue, semaphore, and cache +- Task processing pipeline: Queue → Rate limiting → Submission → Cache update +- `TxGetter`: Function type for querying local database for included transactions +- `updateTaskInCache()`: Helper for safe cache updates preventing status downgrades + +**multiclient.go** +- `multiClient`: Manages multiple RPC connections to block producers +- Parallel submission logic with atomic counters +- Retry mechanism for private transaction failures +- "Already known" error detection and handling + +**private_tx_store.go** +- `PrivateTxStore`: Tracks private transactions with automatic cleanup +- Chain event subscription for detecting included transactions + +## Key Features + +### 1. Preconfirmation System + +**Flow:** +- User submits transaction, it gets queued for processing +- Service submits to all block producers and updates cache +- On status check, service first checks cache (returns immediately if preconfirmed) +- If not in cache, checks local database for inclusion +- If not found locally, queries block producers and updates cache + +**Key Behaviors:** +- **Consensus Requirement**: Preconf is only offered if ALL block producers acknowledge the transaction +- **Status Persistence**: Once `preconfirmed=true`, it never reverts to false +- **Cache-First**: Always checks cache before making external queries +- **Local DB Priority**: Checks local database for included transactions before querying block producers + +### 2. Private Transaction Submission + +**Flow:** +- Transaction submitted in parallel to all block producers +- Failed submissions are retried in background +- During retry, service checks local database for inclusion +- If found in local database, retry stops +- If not found, continues retrying up to maximum retry limit + +**Key Behaviors:** +- **Parallel Submission**: All block producers receive the transaction simultaneously +- **Best Effort**: Returns success if at least one block producer accepts +- **Background Retry**: Failed submissions are retried in background (max 5 retries, 2s interval) +- **Inclusion Detection**: Uses `TxGetter` to check if transaction was included in a block (stops retry) +- **Already Known Handling**: "already known" errors are treated as successful submissions + +### 3. Already Known Error Handling + +**Concept:** +When submitting a transaction, block producers may return "already known" error if they already have the transaction in their mempool. This should NOT be treated as a failure. + +**Behavior:** +- **Preconf Submission**: "already known" counts as preconfirmed for that block producer +- **Private Tx Submission**: "already known" counts as successful submission +- **Retry Logic**: "already known" during retry stops retry for that block producer + +### 4. Transaction Getter (TxGetter) + +**Purpose:** +Check local database for transaction inclusion BEFORE querying block producers via RPC. + +**Usage:** +- **CheckTxPreconfStatus**: Checks local DB first, returns preconf=true if found +- **Retry Logic**: Stops retrying private tx if found in local DB +- **Performance**: Avoids unnecessary RPC calls for included transactions +- **Reliability**: Local DB is authoritative source for inclusion status + +**Integration:** +- Set via `SetTxGetter()` after APIBackend is initialized +- Uses `eth.APIBackend.GetCanonicalTransaction` in production + +## Configuration + +### ServiceConfig + +**Fields:** +- `expiryTickerInterval`: How often to run cleanup (default: 1 minute) +- `expiryInterval`: How long to keep tasks in cache (default: 10 minutes) +- `maxQueuedTasks`: Task queue buffer size (default: 40,000) +- `maxConcurrentTasks`: Semaphore limit for parallel processing (default: 1,024) + +### Initialization + +Service is initialized in `eth/backend.go` with: +- Enable/accept flags for preconf and private tx features +- List of block producer RPC URLs +- Transaction getter is set after APIBackend initialization + +## Rate Limiting & Concurrency + +### Task Queue +- Buffered channel with configurable size +- Non-blocking until full +- FIFO processing order + +### Semaphore +- Limits concurrent task processing +- Prevents overwhelming block producers with parallel requests + +### Cleanup +- Periodic ticker deletes expired tasks from cache +- Prevents unbounded cache growth + +## Testing + +### Test Coverage +- **service_test.go**: Task queue, cache, preconf status, concurrent cache updates +- **multiclient_test.go**: Parallel submissions, retries, already known handling +- **private_tx_store_test.go**: Private tx tracking and cleanup + +### Key Test Scenarios +1. **Already Known**: Proper handling as success for both preconf and private tx +2. **Retry Logic**: Background retries with inclusion detection via TxGetter +3. **Cache Updates**: Preconfirmed status preservation across concurrent updates +4. **Queue Overflow**: Burst submissions exceeding queue capacity +5. **RPC Failures**: Timeouts and server failures +6. **TxGetter Integration**: Local DB checks and fallback to RPC + +### Mock Infrastructure +- `mockRpcServer`: HTTP test server simulating block producer RPC +- Configurable handlers for different RPC methods +- Support for error injection and timeouts + +## Performance Considerations + +### Optimizations +1. **Cache-First**: Reduces RPC load by serving cached results +2. **Local DB Check**: Avoids RPC calls for included transactions +3. **Parallel Submissions**: Submits to all BPs concurrently +4. **Background Retry**: Non-blocking retry mechanism +5. **Atomic Operations**: Lock-free counters for high-concurrency scenarios + +### Bottlenecks +1. **Queue Size**: Limited by `maxQueuedTasks` +2. **Semaphore**: Limited by `maxConcurrentTasks` +3. **RPC Timeout**: 2 second timeout per RPC call +4. **Cache Growth**: Mitigated by periodic cleanup + +## Security Considerations + +1. **Private Transactions**: Only submitted to trusted block producer endpoints +2. **No Broadcast**: Private txs never hit public mempool +3. **Rate Limiting**: Semaphore prevents DoS via task queue +4. **Queue Overflow**: Drops tasks when queue is full (no unbounded memory growth) + +## Future Enhancements + +Potential areas for improvement: +1. **Partial Preconf**: Offer preconf if majority (not all) BPs confirm +2. **Dynamic Retry**: Adjust retry interval based on network conditions +3. **BP Health**: Track block producer health and skip unhealthy ones +4. **Graceful Degradation**: Continue operation with partial BP availability + +## References + +**RPC Methods:** +- `eth_sendRawTransactionForPreconf`: Submit tx for preconfirmation +- `eth_sendRawTransactionPrivate`: Submit private transaction +- `txpool_txStatus`: Check transaction status in block producer pool + +**Related Packages:** +- `core/types`: Transaction types +- `core/txpool`: Transaction pool status constants +- `rpc`: RPC client for block producer communication +- `eth`: Integration point (backend.go) diff --git a/eth/relay/multiclient.go b/eth/relay/multiclient.go new file mode 100644 index 0000000000..9c6ed643c9 --- /dev/null +++ b/eth/relay/multiclient.go @@ -0,0 +1,328 @@ +package relay + +import ( + "context" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rpc" +) + +const ( + rpcTimeout = 2 * time.Second + privateTxRetryInterval = 2 * time.Second + privateTxMaxRetries = 5 +) + +var ( + rpcCallsSuccessMeter = metrics.NewRegisteredMeter("preconfs/rpc/success", nil) + rpcCallsFailureMeter = metrics.NewRegisteredMeter("preconfs/rpc/failure", nil) + rpcErrorInPreconfMeter = metrics.NewRegisteredMeter("preconfs/rpcerror", nil) + belowThresholdPreconfMeter = metrics.NewRegisteredMeter("preconfs/belowthreshold", nil) + alreadyKnownErrMeter = metrics.NewRegisteredMeter("relay/txalreadyknown", nil) +) + +// isAlreadyKnownError checks if the error indicates the transaction is already known to the node +func isAlreadyKnownError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "already known") +} + +// multiClient holds multiple rpc client instances for each block producer +// to perform certain queries across all of them and make a unified decision. +type multiClient struct { + clients []*rpc.Client // rpc client instances dialed to each block producer +} + +func newMultiClient(urls []string) *multiClient { + if len(urls) == 0 { + log.Warn("[tx-relay] No block producer URLs provided") + return nil + } + + clients := make([]*rpc.Client, 0, len(urls)) + failed := 0 + for i, url := range urls { + // We use the rpc dialer for primarily 2 reasons: + // 1. It supports automatic reconnection when connection is lost + // 2. It allows us to do rpc queries which aren't directly available in ethclient (like txpool_contentFrom) + client, err := rpc.Dial(url) + if err != nil { + failed++ + log.Warn("[tx-relay] Failed to dial rpc endpoint, skipping", "url", url, "index", i, "err", err) + continue + } + + // Test connection with a simple call + var blockNumber string + ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout) + err = client.CallContext(ctx, &blockNumber, "eth_blockNumber") + cancel() + if err != nil { + client.Close() + failed++ + log.Warn("[tx-relay] Failed to fetch latest block number, skipping", "url", url, "index", i, "err", err) + continue + } + + number, err := hexutil.DecodeUint64(blockNumber) + if err != nil { + client.Close() + failed++ + log.Warn("[tx-relay] Failed to decode latest block number, skipping", "url", url, "index", i, "err", err) + continue + } + + log.Info("[tx-relay] Dial successful", "blockNumber", number, "index", i) + clients = append(clients, client) + } + + if failed == len(urls) { + log.Info("[tx-relay] Failed to dial all rpc endpoints, disabling completely", "count", len(urls)) + return nil + } + + log.Info("[tx-relay] Initialised rpc client for each block producer", "success", len(clients), "failed", failed) + return &multiClient{ + clients: clients, + } +} + +type SendTxForPreconfResponse struct { + TxHash common.Hash `json:"hash"` + Preconfirmed bool `json:"preconfirmed"` +} + +func (mc *multiClient) submitPreconfTx(rawTx []byte) (bool, error) { + // Submit tx to all block producers in parallel + var lastErr error + var preconfOfferedCount atomic.Uint64 + var wg sync.WaitGroup + for i, client := range mc.clients { + wg.Add(1) + go func(client *rpc.Client, index int) { + defer wg.Done() + + var preconfResponse SendTxForPreconfResponse + ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout) + err := client.CallContext(ctx, &preconfResponse, "eth_sendRawTransactionForPreconf", hexutil.Encode(rawTx)) + cancel() + if err != nil { + rpcCallsFailureMeter.Mark(1) + // If the tx is already known, treat it as preconfirmed for this node + if isAlreadyKnownError(err) { + alreadyKnownErrMeter.Mark(1) + preconfOfferedCount.Add(1) + return + } + lastErr = err + return + } + rpcCallsSuccessMeter.Mark(1) + if preconfResponse.Preconfirmed { + preconfOfferedCount.Add(1) + } + }(client, i) + } + wg.Wait() + + // Note: this can be improved later to only check for current block producer instead of all + // Only offer a preconf if the tx was accepted by all block producers + if preconfOfferedCount.Load() == uint64(len(mc.clients)) { + return true, nil + } + + if lastErr != nil { + rpcErrorInPreconfMeter.Mark(1) + } else { + belowThresholdPreconfMeter.Mark(1) + } + + return false, lastErr +} + +func (mc *multiClient) submitPrivateTx(rawTx []byte, hash common.Hash, retry bool, txGetter TxGetter) error { + // Submit tx to all block producers in parallel (initial attempt) + hexTx := hexutil.Encode(rawTx) + + var lastErr error + var wg sync.WaitGroup + var mu sync.Mutex + + failedIndices := make([]int, 0) + successfulIndices := make([]int, 0) + + for i, client := range mc.clients { + wg.Add(1) + go func(client *rpc.Client, index int) { + defer wg.Done() + + var txHash common.Hash + ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout) + err := client.CallContext(ctx, &txHash, "eth_sendRawTransactionPrivate", hexTx) + cancel() + + mu.Lock() + defer mu.Unlock() + if err != nil { + rpcCallsFailureMeter.Mark(1) + // If the tx is already known, treat it as successful submission + if isAlreadyKnownError(err) { + alreadyKnownErrMeter.Mark(1) + successfulIndices = append(successfulIndices, index) + return + } + lastErr = err + failedIndices = append(failedIndices, index) + log.Debug("[tx-relay] Failed to submit private tx (initial attempt)", "err", err, "producer", index, "hash", hash) + } else { + rpcCallsSuccessMeter.Mark(1) + successfulIndices = append(successfulIndices, index) + } + }(client, i) + } + wg.Wait() + + // If all submissions successful, return immediately + if len(failedIndices) == 0 { + log.Debug("[tx-relay] Successfully submitted private tx to all producers", "hash", hash) + return nil + } + + // Some submissions failed, start background retry + log.Debug("[tx-relay] Failed to submit private tx to one or more block producers, starting retry", + "err", lastErr, "failed", len(failedIndices), "successful", len(successfulIndices), "total", len(mc.clients), "hash", hash) + + if retry { + go mc.retryPrivateTxSubmission(hexTx, hash, failedIndices, txGetter) + } + + return lastErr +} + +// retryPrivateTxSubmission runs in background to retry private tx submission to producers +// that failed initially. It uses local txGetter to check if tx was included in a block. +func (mc *multiClient) retryPrivateTxSubmission(hexTx string, hash common.Hash, failedIndices []int, txGetter TxGetter) { + currentFailedIndices := failedIndices + + for retry := 0; retry < privateTxMaxRetries; retry++ { + // If no more failed producers, we're done + if len(currentFailedIndices) == 0 { + return + } + + // Sleep before retry + time.Sleep(privateTxRetryInterval) + + log.Debug("[tx-relay] Retrying private tx submission", "producers", len(currentFailedIndices), "attempt", retry+1, "hash", hash) + + // Check if tx was already included in a block in local db. If yes, skip + // retrying submission altogether. + if txGetter != nil { + found, tx, _, _, _ := txGetter(hash) + if found && tx != nil { + log.Debug("[tx-relay] Transaction found in local database, stopping retry", "hash", hash) + return + } + } + + // Retry submission for failed producers + var retryWg sync.WaitGroup + var mu sync.Mutex + newFailedIndices := make([]int, 0) + + for _, index := range currentFailedIndices { + retryWg.Add(1) + go func(client *rpc.Client, idx int) { + defer retryWg.Done() + + var txHash common.Hash + ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout) + err := client.CallContext(ctx, &txHash, "eth_sendRawTransactionPrivate", hexTx) + cancel() + + if err != nil { + rpcCallsFailureMeter.Mark(1) + // If the tx is already known, treat it as successful submission + if isAlreadyKnownError(err) { + alreadyKnownErrMeter.Mark(1) + return + } + mu.Lock() + newFailedIndices = append(newFailedIndices, idx) + mu.Unlock() + } else { + rpcCallsSuccessMeter.Mark(1) + } + }(mc.clients[index], index) + } + retryWg.Wait() + + // Update failed indices for next iteration + currentFailedIndices = newFailedIndices + } + + if len(currentFailedIndices) > 0 { + log.Debug("[tx-relay] Finished retry attempts with some producers still failing", + "hash", hash, "failed", len(currentFailedIndices)) + } else { + log.Debug("[tx-relay] All producers accepted private tx after retries", "hash", hash) + } +} + +func (mc *multiClient) checkTxStatus(hash common.Hash) (bool, error) { + // Submit tx to all block producers in parallel + var lastErr error + var preconfOfferedCount atomic.Uint64 + var wg sync.WaitGroup + for i, client := range mc.clients { + wg.Add(1) + go func(client *rpc.Client, index int) { + defer wg.Done() + + var txStatus txpool.TxStatus + ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout) + err := client.CallContext(ctx, &txStatus, "txpool_txStatus", hash) + cancel() + if err != nil { + rpcCallsFailureMeter.Mark(1) + lastErr = err + return + } + rpcCallsSuccessMeter.Mark(1) + if txStatus == txpool.TxStatusPending { + preconfOfferedCount.Add(1) + } + }(client, i) + } + wg.Wait() + + // Only offer a preconf if the tx was accepted by all block producers + if preconfOfferedCount.Load() == uint64(len(mc.clients)) { + return true, nil + } + + if lastErr != nil { + rpcErrorInPreconfMeter.Mark(1) + } else { + belowThresholdPreconfMeter.Mark(1) + } + + return false, lastErr +} + +// Close closes all rpc client connections +func (mc *multiClient) close() { + for _, client := range mc.clients { + client.Close() + } +} diff --git a/eth/relay/multiclient_test.go b/eth/relay/multiclient_test.go new file mode 100644 index 0000000000..97676d31ff --- /dev/null +++ b/eth/relay/multiclient_test.go @@ -0,0 +1,1314 @@ +package relay + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" +) + +type mockRpcServer struct { + server *httptest.Server + + handleBlockNumber func(w http.ResponseWriter, id int) + handleSendPreconfTx func(w http.ResponseWriter, id int, params json.RawMessage) + handleSendPrivateTx func(w http.ResponseWriter, id int, params json.RawMessage) + handleTxStatus func(w http.ResponseWriter, id int, params json.RawMessage) + sendError func(w http.ResponseWriter, id int, code int, message string) +} + +func newMockRpcServer() *mockRpcServer { + m := &mockRpcServer{ + handleBlockNumber: defaultHandleBlockNumber, + handleSendPreconfTx: defaultHandleSendPreconfTx, + handleSendPrivateTx: defaultHandleSendPrivateTx, + handleTxStatus: defaultHandleTxStatus, + sendError: defaultSendError, + } + + mux := http.NewServeMux() + mux.HandleFunc("/", m.handleRequests) + m.server = httptest.NewServer(mux) + + return m +} + +func (m *mockRpcServer) handleRequests(w http.ResponseWriter, r *http.Request) { + var req struct { + ID int `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Handle different RPC methods + switch req.Method { + case "eth_blockNumber": + m.handleBlockNumber(w, req.ID) + case "eth_sendRawTransactionPrivate": + m.handleSendPrivateTx(w, req.ID, req.Params) + case "eth_sendRawTransactionForPreconf": + m.handleSendPreconfTx(w, req.ID, req.Params) + case "txpool_txStatus": + m.handleTxStatus(w, req.ID, req.Params) + default: + m.sendError(w, req.ID, -32601, "method not found") + } +} + +func (m *mockRpcServer) close() { + m.server.Close() +} + +func defaultHandleBlockNumber(w http.ResponseWriter, id int) { + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "result": "0x1", + } + json.NewEncoder(w).Encode(response) +} + +func defaultHandleSendPreconfTx(w http.ResponseWriter, id int, params json.RawMessage) { + // Extract the raw transaction from params + var rawTxParams []string + json.Unmarshal(params, &rawTxParams) + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(hexutil.MustDecode(rawTxParams[0])); err != nil { + defaultSendError(w, id, -32602, err.Error()) + return + } + + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "result": map[string]interface{}{ + "hash": common.HexToHash("0x"), + "preconfirmed": true, + }, + } + json.NewEncoder(w).Encode(response) +} + +func handleSendPreconfTxWithRejection(w http.ResponseWriter, id int, params json.RawMessage) { + // Extract the raw transaction from params + var rawTxParams []string + json.Unmarshal(params, &rawTxParams) + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(hexutil.MustDecode(rawTxParams[0])); err != nil { + defaultSendError(w, id, -32602, err.Error()) + return + } + + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "result": map[string]interface{}{ + "hash": common.HexToHash("0x"), + "preconfirmed": false, + }, + } + json.NewEncoder(w).Encode(response) +} + +func defaultHandleSendPrivateTx(w http.ResponseWriter, id int, params json.RawMessage) { + // Extract the raw transaction from params + var rawTxParams []string + json.Unmarshal(params, &rawTxParams) + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(hexutil.MustDecode(rawTxParams[0])); err != nil { + defaultSendError(w, id, -32602, err.Error()) + return + } + + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "result": tx.Hash(), + } + json.NewEncoder(w).Encode(response) +} + +func defaultHandleTxStatus(w http.ResponseWriter, id int, params json.RawMessage) { + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "result": txpool.TxStatusPending, + } + json.NewEncoder(w).Encode(response) +} + +func makeTxStatusHandler(statusMap map[common.Hash]txpool.TxStatus) func(w http.ResponseWriter, id int, params json.RawMessage) { + return func(w http.ResponseWriter, id int, params json.RawMessage) { + var inputs []common.Hash + json.Unmarshal(params, &inputs) + hash := inputs[0] + + status := txpool.TxStatusUnknown + if s, ok := statusMap[hash]; ok { + status = s + } + + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "result": status, + } + json.NewEncoder(w).Encode(response) + } +} + +func defaultSendError(w http.ResponseWriter, id int, code int, message string) { + response := map[string]interface{}{ + "jsonrpc": "2.0", + "id": id, + "error": map[string]interface{}{ + "code": code, + "message": message, + }, + } + json.NewEncoder(w).Encode(response) +} + +func TestMockRpc(t *testing.T) { + server := newMockRpcServer() + url := server.server.URL + + client, err := rpc.Dial(url) + if err != nil { + t.Fatalf("err: %v", err) + } + + var blockNumber string + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + err = client.CallContext(ctx, &blockNumber, "eth_blockNumber") + cancel() + if err != nil { + t.Fatalf("err: %v", err) + } + + number, _ := hexutil.DecodeUint64(blockNumber) + require.Equal(t, uint64(1), number, "expected default block number to be 1") +} + +func TestNewMulticlient(t *testing.T) { + t.Parallel() + + // Initialize 4 healthy servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 4) + var urls []string = make([]string, 4) + for i := 0; i < 4; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + + t.Run("initialise multiclient with empty set of urls", func(t *testing.T) { + mc := newMultiClient([]string{}) + require.Nil(t, mc, "expected a nil multiclient") + }) + + t.Run("initialise multiclient with all healthy servers", func(t *testing.T) { + mc := newMultiClient(urls) + require.NotNil(t, mc, "expected non-nil multiclient given healthy urls") + require.Equal(t, len(urls), len(mc.clients), "expected all clients given healthy urls") + mc.close() + }) + + t.Run("initialise multiclient with few healthy servers", func(t *testing.T) { + // Close one of the server to simulate failure + rpcServers[0].close() + mc := newMultiClient(urls) + require.NotNil(t, mc, "expected non-nil multiclient given some healthy urls") + require.Equal(t, 3, len(mc.clients), "expected 2 clients given 2 healthy urls") + mc.close() + }) + + t.Run("initialise multiclient with failing call in rpc server", func(t *testing.T) { + // Mock the `eth_blockNumber` call in one of the servers to send + // an error instead of correct response simulating failure. + rpcServers[1].handleBlockNumber = func(w http.ResponseWriter, id int) { + defaultSendError(w, id, -32601, "internal server error") + } + mc := newMultiClient(urls) + require.NotNil(t, mc, "expected non-nil multiclient given some healthy urls") + require.Equal(t, 2, len(mc.clients), "expected 2 clients given 2 healthy urls") + mc.close() + }) + + t.Run("initialise multiclient with timeout in rpc server", func(t *testing.T) { + // Mock the `eth_blockNumber` call in one of the servers to sleep + // for more than `rpcTimeout` duration simulating failure. + rpcServers[1].handleBlockNumber = func(w http.ResponseWriter, id int) { + time.Sleep(rpcTimeout + 100*time.Millisecond) + defaultHandleBlockNumber(w, id) + } + mc := newMultiClient(urls) + require.NotNil(t, mc, "expected non-nil multiclient given some healthy urls") + require.Equal(t, 2, len(mc.clients), "expected 2 clients given 2 healthy urls") + mc.close() + }) + + t.Run("initialise multiclient with all failed servers", func(t *testing.T) { + rpcServers[1].close() + rpcServers[2].close() + rpcServers[3].close() + mc := newMultiClient(urls) + require.Nil(t, mc, "expected nil multiclient given all failing urls") + }) +} + +func TestSubmitPreconfTx(t *testing.T) { + t.Parallel() + + // Create a dummy tx + tx1 := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + rawTx, err := tx1.MarshalBinary() + require.NoError(t, err, "error in marshalling dummy tx") + + // Initialize 4 healthy servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 4) + var urls []string = make([]string, 4) + for i := 0; i < 4; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + + t.Run("submitPreconfTx with healthy BPs", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.NoError(t, err, "expected no error in submitting preconf tx") + require.True(t, res, "expected preconf to be offered by all BPs") + }) + + t.Run("submitPreconfTx with invalid tx", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + invalidRawTx := []byte{0x01, 0x02, 0x03} + res, err := mc.submitPreconfTx(invalidRawTx) + require.Error(t, err, "expected error in submitting invalid preconf tx") + require.False(t, res, "expected preconf to not be offered for invalid tx") + }) + + t.Run("submitPreconfTx with no preconfirmation", func(t *testing.T) { + // Mock one of the server to reject preconfirmation + rpcServers[0].handleSendPreconfTx = handleSendPreconfTxWithRejection + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.NoError(t, err, "expected no error in submitting preconf tx") + require.False(t, res, "expected preconf to be not offered by all BPs") + }) + + t.Run("submitPreconfTx with error in rpc server", func(t *testing.T) { + // Mock one of the servers to return an error + rpcServers[0].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.Error(t, err, "expected error in submitting preconf tx") + require.ErrorContains(t, err, "internal server error", "expected internal server error") + require.False(t, res, "expected preconf to be not offered by all BPs") + }) + + t.Run("submitPreconfTx with timeout in rpc server", func(t *testing.T) { + // Mock one of the servers to timeout + rpcServers[0].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(rpcTimeout + 100*time.Millisecond) + defaultHandleSendPreconfTx(w, id, params) + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.Error(t, err, "expected error in submitting preconf tx") + require.ErrorContains(t, err, "context deadline exceeded", "expected context deadline exceeded error") + require.False(t, res, "expected preconf to be not offered by all BPs") + }) + + t.Run("submitPreconfTx runs in parallel", func(t *testing.T) { + // Ensure all calls take almost 2s of time but don't exceed rpcTimeout + for i := range rpcServers { + rpcServers[i].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(rpcTimeout - 100*time.Millisecond) + defaultHandleSendPreconfTx(w, id, params) + } + } + + mc := newMultiClient(urls) + defer mc.close() + + start := time.Now() + res, err := mc.submitPreconfTx(rawTx) + elapsed := time.Since(start) + + require.NoError(t, err, "expected no error in submitting preconf tx") + require.True(t, res, "expected preconf to be offered by all BPs") + require.Less(t, elapsed, 2*time.Second, "expected parallel calls to finish below timeout") + require.Greater(t, elapsed, rpcTimeout-100*time.Millisecond, "expected calls to take at least time taken by all calls") + }) + + t.Run("submitPreconfTx with already known error from one BP", func(t *testing.T) { + // Reset all handlers to default + for i := range rpcServers { + rpcServers[i].handleSendPreconfTx = defaultHandleSendPreconfTx + } + + // Mock server 0 to return "already known" error + rpcServers[0].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.NoError(t, err, "expected no error when one BP returns already known") + require.True(t, res, "expected preconf to be offered when all BPs accept (including already known)") + }) + + t.Run("submitPreconfTx with already known error from all BPs", func(t *testing.T) { + // Mock all servers to return "already known" error + for i := range rpcServers { + rpcServers[i].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.NoError(t, err, "expected no error when all BPs return already known") + require.True(t, res, "expected preconf to be offered when all BPs return already known") + }) + + t.Run("submitPreconfTx with already known and different error", func(t *testing.T) { + // Some BPs return already known, one returns a different error, rest succeed + rpcServers[0].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + rpcServers[1].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + rpcServers[2].handleSendPreconfTx = defaultHandleSendPreconfTx + rpcServers[3].handleSendPreconfTx = defaultHandleSendPreconfTx + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.Error(t, err, "expected error when one BP returns an error which apart from already known") + require.ErrorContains(t, err, "internal server error", "expected internal server error") + require.False(t, res, "expected preconf to not be offered when one BP fails with non-already-known error") + }) + + t.Run("submitPreconfTx with already known and rejection", func(t *testing.T) { + // Some BPs return already known, one rejects preconf + rpcServers[0].handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + rpcServers[1].handleSendPreconfTx = handleSendPreconfTxWithRejection + rpcServers[2].handleSendPreconfTx = defaultHandleSendPreconfTx + rpcServers[3].handleSendPreconfTx = defaultHandleSendPreconfTx + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.submitPreconfTx(rawTx) + require.NoError(t, err, "expected no error") + require.False(t, res, "expected preconf to not be offered when one BP rejects") + }) + + t.Run("submitPreconfTx with some failing servers", func(t *testing.T) { + // Set handlers back to default + for i := range rpcServers { + rpcServers[i].handleSendPreconfTx = defaultHandleSendPreconfTx + } + + // Initialise multiclient with healthy servers + mc := newMultiClient(urls) + defer mc.close() + + // Close one of the servers to simulate failure + rpcServers[0].close() + + // Ensure all 4 clients are still available + require.Equal(t, len(urls), len(mc.clients), "expected all clients given healthy urls") + + res, err := mc.submitPreconfTx(rawTx) + require.Error(t, err, "expected error in submitting preconf tx") + require.False(t, res, "expected preconf to be not offered by all BPs") + }) + + t.Run("submitPreconfTx with all failing servers", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + // Close all servers to simulate failure + rpcServers[1].close() + rpcServers[2].close() + rpcServers[3].close() + + res, err := mc.submitPreconfTx(rawTx) + require.Error(t, err, "expected error in submitting preconf tx") + require.False(t, res, "expected preconf to be not offered by all BPs") + }) +} + +func TestSubmitPrivateTx(t *testing.T) { + t.Parallel() + + // Create a dummy tx + tx1 := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + rawTx, err := tx1.MarshalBinary() + require.NoError(t, err, "error in marshalling dummy tx") + + // Initialize 4 healthy servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 4) + var urls []string = make([]string, 4) + for i := 0; i < 4; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + + t.Run("submitPrivateTx with all healthy BPs", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.NoError(t, err, "expected no error in submitting private tx to all healthy BPs") + }) + + t.Run("submitPrivateTx with invalid tx", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + invalidRawTx := []byte{0x01, 0x02, 0x03} + err := mc.submitPrivateTx(invalidRawTx, common.Hash{}, false, nil) + require.Error(t, err, "expected error in submitting invalid private tx") + }) + + t.Run("submitPrivateTx with error in one RPC server", func(t *testing.T) { + // Mock one of the servers to return an error + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when one BP fails") + require.ErrorContains(t, err, "internal server error", "expected internal server error") + }) + + t.Run("submitPrivateTx with timeout in one RPC server", func(t *testing.T) { + // Reset server 0 to default first + rpcServers[0].handleSendPrivateTx = defaultHandleSendPrivateTx + + // Mock one server to timeout + rpcServers[1].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(rpcTimeout + 100*time.Millisecond) + defaultHandleSendPrivateTx(w, id, params) + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when one BP times out") + require.ErrorContains(t, err, "context deadline exceeded", "expected context deadline exceeded error") + }) + + t.Run("submitPrivateTx runs in parallel", func(t *testing.T) { + // Reset all handlers and make each call take almost rpcTimeout + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(rpcTimeout - 100*time.Millisecond) + defaultHandleSendPrivateTx(w, id, params) + } + } + + mc := newMultiClient(urls) + defer mc.close() + + start := time.Now() + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + elapsed := time.Since(start) + + require.NoError(t, err, "expected no error in submitting private tx") + require.Less(t, elapsed, 2*time.Second, "expected parallel calls to finish below total timeout") + require.Greater(t, elapsed, rpcTimeout-100*time.Millisecond, "expected calls to take at least the time of one call") + }) + + t.Run("submitPrivateTx with multiple BPs failing", func(t *testing.T) { + // Reset handlers first + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = defaultHandleSendPrivateTx + } + + // Make 2 servers fail + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + rpcServers[1].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32602, "another error") + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when multiple BPs fail") + }) + + t.Run("submitPrivateTx with all BPs failing", func(t *testing.T) { + // Make all servers fail + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when all BPs fail") + require.ErrorContains(t, err, "internal server error", "expected error message from failing BPs") + }) + + t.Run("submitPrivateTx with already known error from one BP", func(t *testing.T) { + // Reset all handlers to default + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = defaultHandleSendPrivateTx + } + + // Mock one server to return "already known" error + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.NoError(t, err, "expected no error when one BP returns already known") + }) + + t.Run("submitPrivateTx with already known error from all BPs", func(t *testing.T) { + // Mock all servers to return "already known" error + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.NoError(t, err, "expected no error when all BPs return already known") + }) + + t.Run("submitPrivateTx with already known and different error", func(t *testing.T) { + // Some BPs return already known, one returns a different error, rest succeed + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + } + rpcServers[1].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + rpcServers[2].handleSendPrivateTx = defaultHandleSendPrivateTx + rpcServers[3].handleSendPrivateTx = defaultHandleSendPrivateTx + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when one BP returns non-already-known error") + require.ErrorContains(t, err, "internal server error", "expected internal server error") + }) + + t.Run("submitPrivateTx with some BPs failing after initialization", func(t *testing.T) { + // Reset all handlers to default + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = defaultHandleSendPrivateTx + } + + // Initialize multiclient with all healthy servers + mc := newMultiClient(urls) + defer mc.close() + + // Close one server to simulate failure after initialization + rpcServers[0].close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when BP fails after initialization") + }) + + t.Run("submitPrivateTx with all BPs failing after initialization", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + // Close all remaining servers + rpcServers[1].close() + rpcServers[2].close() + rpcServers[3].close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), false, nil) + require.Error(t, err, "expected error when all BPs fail") + }) +} + +func TestCheckTxStatus(t *testing.T) { + t.Parallel() + + // Create a dummy tx + tx1 := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + + // Initialize 4 healthy servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 4) + var urls []string = make([]string, 4) + for i := 0; i < 4; i++ { + rpcServers[i] = newMockRpcServer() + // Mock all servers to return pending status for tx1 + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + }) + urls[i] = rpcServers[i].server.URL + } + + t.Run("checkTxStatus with all BPs having tx as pending", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.NoError(t, err, "expected no error in checking tx status") + require.True(t, res, "expected result to be true as status is pending in all BPs") + }) + + t.Run("checkTxStatus with unknown tx hash", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(common.HexToHash("0x1")) + require.NoError(t, err, "expected no error in checking tx status") + require.False(t, res, "expected result to be false as status is unknown") + }) + + t.Run("checkTxStatus with mixed statuses across BPs", func(t *testing.T) { + // Some BPs have pending, some have unknown + rpcServers[0].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + }) + rpcServers[1].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + }) + rpcServers[2].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{}) + rpcServers[3].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{}) + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.NoError(t, err, "expected no error in checking tx status") + require.False(t, res, "expected result to be false as not all BPs have tx in pending/included state") + }) + + t.Run("checkTxStatus with all BPs returning unknown status", func(t *testing.T) { + // All servers return unknown status + for i := range rpcServers { + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{}) + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.NoError(t, err, "expected no error in checking tx status") + require.False(t, res, "expected result to be false as all BPs return unknown status") + }) + + t.Run("checkTxStatus with tx queued in all BPs", func(t *testing.T) { + // All servers return queued status (should not count as valid) + for i := range rpcServers { + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusQueued, + }) + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.NoError(t, err, "expected no error in checking tx status") + require.False(t, res, "expected result to be false as queued is not accepted") + }) + + t.Run("checkTxStatus with error in one RPC server", func(t *testing.T) { + // Reset to all pending + for i := range rpcServers { + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + }) + } + + // One server returns error + rpcServers[0].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.Error(t, err, "expected error in checking tx status") + require.ErrorContains(t, err, "internal server error", "expected internal server error") + require.False(t, res, "expected result to be false due to error") + }) + + t.Run("checkTxStatus with timeout in one RPC server", func(t *testing.T) { + // One server times out + rpcServers[0].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(rpcTimeout + 100*time.Millisecond) + makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + })(w, id, params) + } + + mc := newMultiClient(urls) + defer mc.close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.Error(t, err, "expected error due to timeout") + require.ErrorContains(t, err, "context deadline exceeded", "expected context deadline exceeded error") + require.False(t, res, "expected result to be false due to timeout") + }) + + t.Run("checkTxStatus runs in parallel", func(t *testing.T) { + // All calls take almost rpcTimeout but don't exceed it + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(rpcTimeout - 100*time.Millisecond) + makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + })(w, id, params) + } + } + + mc := newMultiClient(urls) + defer mc.close() + + start := time.Now() + res, err := mc.checkTxStatus(tx1.Hash()) + elapsed := time.Since(start) + + require.NoError(t, err, "expected no error in checking tx status") + require.True(t, res, "expected result to be true") + require.Less(t, elapsed, 2*time.Second, "expected parallel calls to finish below total timeout") + require.Greater(t, elapsed, rpcTimeout-100*time.Millisecond, "expected calls to take at least the time of one call") + }) + + t.Run("checkTxStatus with some failing servers after initialization", func(t *testing.T) { + // Reset all handlers to default + for i := range rpcServers { + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx1.Hash(): txpool.TxStatusPending, + }) + } + + // Initialize multiclient with all healthy servers + mc := newMultiClient(urls) + defer mc.close() + + // Close one server to simulate failure after initialization + rpcServers[0].close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.Error(t, err, "expected error due to failed server") + require.False(t, res, "expected result to be false due to failed server") + }) + + t.Run("checkTxStatus with all failing servers", func(t *testing.T) { + mc := newMultiClient(urls) + defer mc.close() + + // Close all remaining servers + rpcServers[1].close() + rpcServers[2].close() + rpcServers[3].close() + + res, err := mc.checkTxStatus(tx1.Hash()) + require.Error(t, err, "expected error with all servers failing") + require.False(t, res, "expected result to be false with all servers failing") + }) +} + +func TestPrivateTxSubmissionRetry(t *testing.T) { + t.Parallel() + + // Create a dummy tx + tx1 := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + rawTx, err := tx1.MarshalBinary() + require.NoError(t, err, "error in marshalling dummy tx") + + // Create 4 servers that succeed on first attempt + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 4) + var urls []string = make([]string, 4) + for i := 0; i < 4; i++ { + rpcServers[i] = newMockRpcServer() + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{}) + urls[i] = rpcServers[i].server.URL + } + + t.Run("retry succeeds after N attempts", func(t *testing.T) { + // Track call counts for servers that will fail initially + var callCounts [4]atomic.Int32 + + // Servers 0 and 1 fail twice, then succeed + for i := 0; i < 2; i++ { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[i].Add(1) + if count <= 2 { + // Fail first 2 attempts + defaultSendError(w, id, -32601, "internal server error") + } else { + // Succeed on 3rd attempt + defaultHandleSendPrivateTx(w, id, params) + } + } + } + + // Servers 2 and 3 always succeed. Just track call counts. + for i := 2; i < 4; i++ { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission") + + // Wait for retries to complete (2 retries * 2s interval + buffer) + time.Sleep(2*privateTxRetryInterval + 100*time.Millisecond) + + // Verify that failing servers were called multiple times including initial submission + require.Equal(t, int32(3), callCounts[0].Load(), "expected server 0 to be called 3 times") + require.Equal(t, int32(3), callCounts[1].Load(), "expected server 1 to be called 3 times") + + // Verify that healthy servers were only called once during initial submission + require.Equal(t, int32(1), callCounts[2].Load(), "expected server 2 to be called once") + require.Equal(t, int32(1), callCounts[3].Load(), "expected server 3 to be called once") + }) + + t.Run("retry stops when tx found in local database", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 0 fails always + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[0].Add(1) + defaultSendError(w, id, -32601, "internal server error") + } + + mc := newMultiClient(urls) + defer mc.close() + + // Set up txGetter that will return the transaction as found (simulating it got included) + txGetter := func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + if hash == tx1.Hash() { + return true, tx1, common.Hash{}, 0, 0 + } + return false, nil, common.Hash{}, 0, 0 + } + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, txGetter) + require.Error(t, err, "expected error on initial submission") + + // Wait for one retry attempt + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // Since tx is found in local database, retry should stop early + // Server 0 should be called only once during initial submission (no retries) + require.Equal(t, int32(1), callCounts[0].Load(), "expected server 0 to be called only once, no retries after tx found") + // All other servers should be called only once during initial submission + for i := 1; i < 4; i++ { + require.Equal(t, int32(1), callCounts[i].Load(), "expected server %d to be called only once during initial submission", i) + } + }) + + t.Run("retry until max retries reached", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + rpcServers[i].handleTxStatus = defaultHandleTxStatus + } + + // Server 0 always fails + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[0].Add(1) + defaultSendError(w, id, -32601, "internal server error") + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission") + + // Wait for all retries to complete (5 retries * 2s interval + buffer) + time.Sleep(5*privateTxRetryInterval + 100*time.Millisecond) + + // Server 0 should be called 6 times (1 for initial submission and 5 retries) + require.Equal(t, int32(6), callCounts[0].Load(), "expected server 0 to be called 6 times (1 initial + 5 retries)") + // All other servers should be called exactly once during initial submission + for i := 1; i < 4; i++ { + require.Equal(t, int32(1), callCounts[i].Load(), "expected server %d to be called only once during initial submission", i) + } + }) + + t.Run("retry with mixed success and failure", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 0 fails once, then succeeds + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[0].Add(1) + if count == 1 { + defaultSendError(w, id, -32601, "temporary failure") + } else { + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 1 always fails + rpcServers[1].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[1].Add(1) + defaultSendError(w, id, -32602, "permanent failure") + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission") + + // Wait for all retries to complete (5 retries * 2s interval + buffer) + time.Sleep(5*privateTxRetryInterval + 100*time.Millisecond) + + // Server 0 should be called twice (initial + 1 retry that succeeds) + require.Equal(t, int32(2), callCounts[0].Load(), "expected server 0 to succeed on second attempt") + + // Server 1 should be called multiple times (keeps failing) + require.Equal(t, int32(6), callCounts[1].Load(), "expected server 1 to be retried multiple times") + + // Other servers should be called only once during initial submission + require.Equal(t, int32(1), callCounts[2].Load(), "expected server 2 to be called only once during initial submission") + require.Equal(t, int32(1), callCounts[3].Load(), "expected server 3 to be called only once during initial submission") + }) + + t.Run("retry with all BPs eventually succeeding", func(t *testing.T) { + var callCounts [4]atomic.Int32 + + // All servers fail once, then succeed + for i := 0; i < 4; i++ { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[i].Add(1) + if count == 1 { + defaultSendError(w, id, -32601, "temporary failure") + } else { + defaultHandleSendPrivateTx(w, id, params) + } + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission when all BPs fail") + + // Wait for retry to complete + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // All servers should be called exactly twice (initial + 1 successful retry) + for i := 0; i < 4; i++ { + require.Equal(t, int32(2), callCounts[i].Load(), "expected server %d to be called exactly twice", i) + } + }) + + t.Run("retry handles timeout in failed BP", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 0 times out on first call, succeeds on retry + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[0].Add(1) + if count == 1 { + time.Sleep(rpcTimeout + 100*time.Millisecond) + } + defaultHandleSendPrivateTx(w, id, params) + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected timeout error on initial submission") + require.ErrorContains(t, err, "context deadline exceeded", "expected timeout error") + + // Wait for retry + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // Server 0 should be retried and succeed + require.Equal(t, int32(2), callCounts[0].Load(), "expected server 0 to be retried after timeout") + for i := 1; i < 4; i++ { + require.Equal(t, int32(1), callCounts[i].Load(), "expected server %d to be called only once", i) + } + }) + + t.Run("retry receives already known error on first retry", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + rpcServers[i].handleTxStatus = defaultHandleTxStatus + } + + // Server 0 fails first, then returns already known on retry + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[0].Add(1) + if count == 1 { + defaultSendError(w, id, -32601, "internal server error") + } else { + // On retry, return already known + defaultSendError(w, id, -32000, "already known") + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission") + + // Wait for one retry attempt + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // Server 0 should be called twice (initial + 1 retry with already known) + require.Equal(t, int32(2), callCounts[0].Load(), "expected server 0 to be called twice") + + // No further retries should happen after already known + time.Sleep(privateTxRetryInterval) + require.Equal(t, int32(2), callCounts[0].Load(), "expected no further retries after already known") + }) + + t.Run("retry with already known error from multiple BPs", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Servers 0 and 1 fail initially, then return already known on retry + for i := 0; i < 2; i++ { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[i].Add(1) + if count == 1 { + defaultSendError(w, id, -32601, "temporary failure") + } else { + // On retry, return already known + defaultSendError(w, id, -32000, "already known") + } + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission") + + // Wait for retry + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // Servers 0 and 1 should be called twice (initial + retry) + require.Equal(t, int32(2), callCounts[0].Load(), "expected server 0 to be called twice") + require.Equal(t, int32(2), callCounts[1].Load(), "expected server 1 to be called twice") + // Other servers should be called only once + require.Equal(t, int32(1), callCounts[2].Load(), "expected server 2 to be called once") + require.Equal(t, int32(1), callCounts[3].Load(), "expected server 3 to be called once") + }) + + t.Run("retry with already known on initial submission", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 0 returns already known on initial submission + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[0].Add(1) + defaultSendError(w, id, -32000, "already known") + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.NoError(t, err, "expected no error when all submissions succeed or return already known") + + // Wait to ensure no retries happen + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // Server 0 should be called only once (no retry needed as already known treated as success) + require.Equal(t, int32(1), callCounts[0].Load(), "expected server 0 to be called only once") + }) + + t.Run("retry with mix of already known and successful retries", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 0 fails, then returns already known + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[0].Add(1) + if count == 1 { + defaultSendError(w, id, -32601, "temporary failure") + } else { + defaultSendError(w, id, -32000, "already known") + } + } + + // Server 1 fails, then succeeds normally + rpcServers[1].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + count := callCounts[1].Add(1) + if count == 1 { + defaultSendError(w, id, -32602, "temporary failure") + } else { + defaultHandleSendPrivateTx(w, id, params) + } + } + + mc := newMultiClient(urls) + defer mc.close() + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, nil) + require.Error(t, err, "expected error on initial submission") + + // Wait for retry + time.Sleep(privateTxRetryInterval + 100*time.Millisecond) + + // Both servers 0 and 1 should be called twice + require.Equal(t, int32(2), callCounts[0].Load(), "expected server 0 to be called twice") + require.Equal(t, int32(2), callCounts[1].Load(), "expected server 1 to be called twice") + // No further retries should happen + time.Sleep(privateTxRetryInterval) + require.Equal(t, int32(2), callCounts[0].Load(), "expected no further retries") + require.Equal(t, int32(2), callCounts[1].Load(), "expected no further retries") + }) + + t.Run("retry with txGetter not finding tx continues retrying", func(t *testing.T) { + // Reset handlers to default first + var callCounts [4]atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[i].Add(1) + defaultHandleSendPrivateTx(w, id, params) + } + } + + // Server 0 always fails + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCounts[0].Add(1) + defaultSendError(w, id, -32601, "internal server error") + } + + mc := newMultiClient(urls) + defer mc.close() + + // Set up txGetter that doesn't find the transaction + var txGetterCallCount atomic.Int32 + txGetter := func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + txGetterCallCount.Add(1) + return false, nil, common.Hash{}, 0, 0 + } + + err := mc.submitPrivateTx(rawTx, tx1.Hash(), true, txGetter) + require.Error(t, err, "expected error on initial submission") + + // Wait for all retries to complete + time.Sleep(5*privateTxRetryInterval + 100*time.Millisecond) + + // Server 0 should be called 6 times (1 initial + 5 retries) + require.Equal(t, int32(6), callCounts[0].Load(), "expected server 0 to be called 6 times") + // TxGetter should be called 5 times (once per retry attempt) + require.Equal(t, int32(5), txGetterCallCount.Load(), "expected txGetter to be called 5 times during retries") + }) +} diff --git a/eth/relay/private_tx_store.go b/eth/relay/private_tx_store.go new file mode 100644 index 0000000000..7d8fa8e4ae --- /dev/null +++ b/eth/relay/private_tx_store.go @@ -0,0 +1,148 @@ +package relay + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +var totalPrivateTxsMeter = metrics.NewRegisteredMeter("privatetxs/count", nil) + +type PrivateTxGetter interface { + IsTxPrivate(hash common.Hash) bool +} + +type PrivateTxSetter interface { + Add(hash common.Hash) + Purge(hash common.Hash) +} + +type PrivateTxStore struct { + txs map[common.Hash]time.Time // tx hash to last updated time + mu sync.RWMutex + + chainEventSubFn func(ch chan<- core.ChainEvent) event.Subscription + + // metrics + txsAdded atomic.Uint64 + txsPurged atomic.Uint64 // deleted by an explicit call + txsDeleted atomic.Uint64 // deleted because tx got included + + closeCh chan struct{} +} + +func NewPrivateTxStore() *PrivateTxStore { + store := &PrivateTxStore{ + txs: make(map[common.Hash]time.Time), + closeCh: make(chan struct{}), + } + go store.report() + return store +} + +func (s *PrivateTxStore) Add(hash common.Hash) { + s.mu.Lock() + defer s.mu.Unlock() + + s.txs[hash] = time.Now() + s.txsAdded.Add(1) +} + +func (s *PrivateTxStore) Purge(hash common.Hash) { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.txs, hash) + s.txsPurged.Add(1) +} + +func (s *PrivateTxStore) IsTxPrivate(hash common.Hash) bool { + s.mu.RLock() + defer s.mu.RUnlock() + + if _, ok := s.txs[hash]; ok { + return true + } + + return false +} + +func (s *PrivateTxStore) cleanupLoop() { + for { + if err := s.cleanup(); err != nil { + log.Debug("Error cleaning up private tx store, restarting", "err", err) + time.Sleep(time.Second) + } else { + break + } + } +} + +func (s *PrivateTxStore) cleanup() error { + if s.chainEventSubFn == nil { + return fmt.Errorf("private tx store: chain event subscription not set") + } + + var chainEventCh = make(chan core.ChainEvent) + chainEventSub := s.chainEventSubFn(chainEventCh) + + for { + select { + case event := <-chainEventCh: + s.mu.Lock() + deleted := uint64(0) + for _, tx := range event.Transactions { + if _, exists := s.txs[tx.Hash()]; exists { + deleted++ + delete(s.txs, tx.Hash()) + } + } + s.txsDeleted.Add(deleted) + s.mu.Unlock() + case err := <-chainEventSub.Err(): + return err + case <-s.closeCh: + chainEventSub.Unsubscribe() + return nil + } + } +} + +func (s *PrivateTxStore) SetchainEventSubFn(fn func(ch chan<- core.ChainEvent) event.Subscription) { + if fn != nil && s.chainEventSubFn == nil { + s.chainEventSubFn = fn + go s.cleanupLoop() + } +} + +func (s *PrivateTxStore) report() { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.mu.RLock() + storeSize := len(s.txs) + s.mu.RUnlock() + totalPrivateTxsMeter.Mark(int64(storeSize)) + log.Info("[private-tx-store] stats", "len", storeSize, "added", s.txsAdded.Load(), "purged", s.txsPurged.Load(), "deleted", s.txsDeleted.Load()) + s.txsAdded.Store(0) + s.txsPurged.Store(0) + s.txsDeleted.Store(0) + case <-s.closeCh: + return + } + } +} + +func (s *PrivateTxStore) Close() { + close(s.closeCh) +} diff --git a/eth/relay/private_tx_store_test.go b/eth/relay/private_tx_store_test.go new file mode 100644 index 0000000000..d45c7243c7 --- /dev/null +++ b/eth/relay/private_tx_store_test.go @@ -0,0 +1,206 @@ +package relay + +import ( + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/stretchr/testify/require" +) + +// TestNewPrivateTxStore tests store initialization +func TestNewPrivateTxStore(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + if store == nil { + t.Fatal("expected non-nil store") + } + defer store.Close() + + require.Empty(t, store.txs, "expected store to be empty initially") +} + +// TestPrivateTxStoreOperations tests basic operations of the store +// like adding, purging and reading transactions. +func TestPrivateTxStoreOperations(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + defer store.Close() + + // Ensure store is empty initially + require.Empty(t, store.txs, "expected store to be empty initially") + + // Add a few transactions + hash1 := common.HexToHash("0x1") + store.Add(hash1) + require.Len(t, store.txs, 1, "expected store to have 1 transaction after Add") + hash2 := common.HexToHash("0x2") + store.Add(hash2) + require.Len(t, store.txs, 2, "expected store to have 2 transactions after Add") + hash3 := common.HexToHash("0x3") + store.Add(hash3) + require.Len(t, store.txs, 3, "expected store to have 3 transactions after Add") + + // Ensure metrics are correctly reported + require.Equal(t, uint64(3), store.txsAdded.Load(), "expected txsAdded metric to be 3") + require.Equal(t, uint64(0), store.txsPurged.Load(), "expected txsPurged metric to be 0") + + // Query all transactions + require.True(t, store.IsTxPrivate(hash1), "expected hash1 to be private") + require.True(t, store.IsTxPrivate(hash2), "expected hash2 to be private") + require.True(t, store.IsTxPrivate(hash3), "expected hash3 to be private") + unknownHash := common.HexToHash("0x4") + require.False(t, store.IsTxPrivate(unknownHash), "expected unknownHash not to be private") + + // Purge + store.Purge(hash2) + require.Len(t, store.txs, 2, "expected store to have 2 transactions after Purge") + require.False(t, store.IsTxPrivate(hash2), "expected hash2 not to be private after Purge") + require.Equal(t, uint64(1), store.txsPurged.Load(), "expected txsPurged metric to be 1") + + // Purging same hash should not panic + store.Purge(hash2) + require.Len(t, store.txs, 2, "expected store to still have 2 transactions after purging non-existent hash") +} + +// TestPrivateTxStoreConcurrentOperations tests concurrent operations on the store +func TestPrivateTxStoreConcurrentOperations(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + defer store.Close() + + var wg sync.WaitGroup + numGoroutines := 100 + numOpsPerGoroutine := 100 + + // Concurrent adds, checks, and purges + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + for j := 0; j < numOpsPerGoroutine; j++ { + hash := common.BigToHash(big.NewInt(int64(id*numOpsPerGoroutine + j))) + + store.Add(hash) + require.Equal(t, true, store.IsTxPrivate(hash), "expected hash to be private after Add") + if j%2 == 0 { + store.Purge(hash) + require.Equal(t, false, store.IsTxPrivate(hash), "expected hash not to be private after Purge") + } + } + }(i) + } + + wg.Wait() + // Should not panic or race + + require.Equal(t, numGoroutines*numOpsPerGoroutine/2, len(store.txs), "expected total count of private txs to match") +} + +// mockChainEventGenerator simulates chain events for testing +type mockChainEventGenerator struct { + ch chan core.ChainEvent + sub *mockSubscription +} + +type mockSubscription struct { + errCh chan error +} + +func (s *mockSubscription) Unsubscribe() { + close(s.errCh) +} + +func (s *mockSubscription) Err() <-chan error { + return s.errCh +} + +func newMockChainEventGenerator() *mockChainEventGenerator { + return &mockChainEventGenerator{ + ch: make(chan core.ChainEvent, 10), + sub: &mockSubscription{ + errCh: make(chan error), + }, + } +} + +func (m *mockChainEventGenerator) subscribe(ch chan<- core.ChainEvent) event.Subscription { + // Forward events from our internal channel to the subscriber + go func() { + for event := range m.ch { + ch <- event + } + }() + return m.sub +} + +func (m *mockChainEventGenerator) sendEvent(event core.ChainEvent) { + m.ch <- event +} + +func (m *mockChainEventGenerator) close() { + close(m.ch) +} + +// TestPrivateTxStoreCleanup tests automatic cleanup of store on +// receiving new chain events via subscription. +func TestPrivateTxStoreCleanup(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + defer store.Close() + + // Check that there's no subscription initially + require.Nil(t, store.chainEventSubFn, "chainEventSubFn should be nil initially") + + // Explicitly start cleanup process without setting the chain event subscription function + err := store.cleanup() + require.Error(t, err, "expected error when doing cleanup without chain event subscription function") + + // Create mock chain event generator + mockGen := newMockChainEventGenerator() + defer mockGen.close() + + // Set the chain event subscription function to start cleanup routine + // in background. + store.SetchainEventSubFn(mockGen.subscribe) + + // Create some mock transactions + tx1 := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + tx2 := types.NewTransaction(2, common.Address{}, nil, 0, nil, nil) + tx3 := types.NewTransaction(3, common.Address{}, nil, 0, nil, nil) + store.Add(tx1.Hash()) + store.Add(tx2.Hash()) + store.Add(tx3.Hash()) + + require.Equal(t, true, store.IsTxPrivate(tx1.Hash()), "expected tx1 to be in store") + require.Equal(t, true, store.IsTxPrivate(tx2.Hash()), "expected tx2 to be in store") + require.Equal(t, true, store.IsTxPrivate(tx3.Hash()), "expected tx3 to be in store") + + // Create a chain event including some transactions + tx4 := types.NewTransaction(4, common.Address{}, nil, 0, nil, nil) + mockGen.sendEvent(core.ChainEvent{ + Transactions: types.Transactions{tx2, tx4}, + }) + + // Give the cleanup goroutine time to process + time.Sleep(100 * time.Millisecond) + + // Confirm that tx2 is removed from the store + require.Equal(t, false, store.IsTxPrivate(tx2.Hash()), "expected tx2 to be removed from store") + + // Confirm that tx1 and tx3 are still present + require.Equal(t, true, store.IsTxPrivate(tx1.Hash()), "expected tx1 to still be in store") + require.Equal(t, true, store.IsTxPrivate(tx3.Hash()), "expected tx3 to still be in store") + + // Ensure metrics are correctly reported + require.Equal(t, uint64(1), store.txsDeleted.Load(), "expected txsDeleted metric to be 1") +} diff --git a/eth/relay/relay.go b/eth/relay/relay.go new file mode 100644 index 0000000000..017ddf7067 --- /dev/null +++ b/eth/relay/relay.go @@ -0,0 +1,149 @@ +package relay + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + errRelayNotConfigured = errors.New("relay service not configured") +) + +type Config struct { + // for relay + enablePreconf bool + enablePrivateTx bool + + // for block producers + acceptPreconfTx bool + acceptPrivateTx bool +} + +// RelayService handles all preconf and private transaction related services +type RelayService struct { + config Config + privateTxStore *PrivateTxStore + txRelay *Service +} + +func Init(enablePreconf, enablePrivateTx, acceptPreconfTx, acceptPrivateTx bool, blockProducerURLs []string) *RelayService { + config := Config{ + enablePreconf: enablePreconf, + enablePrivateTx: enablePrivateTx, + acceptPreconfTx: acceptPreconfTx, + acceptPrivateTx: acceptPrivateTx, + } + var privateTxStore *PrivateTxStore + if acceptPrivateTx { + privateTxStore = NewPrivateTxStore() + } + var txRelay *Service + if enablePreconf || enablePrivateTx { + txRelay = NewService(blockProducerURLs, nil) + } + return &RelayService{ + config: config, + privateTxStore: privateTxStore, + txRelay: txRelay, + } +} + +func (s *RelayService) RecordPrivateTx(hash common.Hash) { + if s.privateTxStore != nil { + s.privateTxStore.Add(hash) + } +} + +func (s *RelayService) PurgePrivateTx(hash common.Hash) { + if s.privateTxStore != nil { + s.privateTxStore.Purge(hash) + } +} + +func (s *RelayService) GetPrivateTxGetter() PrivateTxGetter { + var getter PrivateTxGetter + if s.privateTxStore != nil { + getter = s.privateTxStore + } + return getter +} + +func (s *RelayService) SetchainEventSubFn(fn func(ch chan<- core.ChainEvent) event.Subscription) { + if s.privateTxStore != nil { + s.privateTxStore.SetchainEventSubFn(fn) + } +} + +func (s *RelayService) SetTxGetter(getter TxGetter) { + if s.txRelay != nil { + s.txRelay.SetTxGetter(getter) + } +} + +func (s *RelayService) PreconfEnabled() bool { + return s.config.enablePreconf +} + +func (s *RelayService) PrivateTxEnabled() bool { + return s.config.enablePrivateTx +} + +func (s *RelayService) AcceptPreconfTxs() bool { + return s.config.acceptPreconfTx +} + +func (s *RelayService) AcceptPrivateTxs() bool { + return s.config.acceptPrivateTx +} + +// SubmitPreconfTransaction submits a transaction for preconfirmation to block producers +func (s *RelayService) SubmitPreconfTransaction(tx *types.Transaction) error { + if s.txRelay == nil { + return fmt.Errorf("request dropped: %w", errRelayNotConfigured) + } + err := s.txRelay.SubmitTransactionForPreconf(tx) + if err != nil { + return fmt.Errorf("request dropped: %w", err) + } + return nil +} + +// SubmitPrivateTransaction submits a private transaction to block producers +func (s *RelayService) SubmitPrivateTransaction(tx *types.Transaction) error { + if s.txRelay == nil { + return fmt.Errorf("request dropped: %w", errRelayNotConfigured) + } + err := s.txRelay.SubmitPrivateTx(tx, true) + if err != nil { + // Don't add extra context to this error as it will be floated back to user + return err + } + return nil +} + +// CheckPreconfStatus checks the preconfirmation status of a transaction +func (s *RelayService) CheckPreconfStatus(hash common.Hash) (bool, error) { + if s.txRelay == nil { + return false, fmt.Errorf("request dropped: %w", errRelayNotConfigured) + } + preconf, err := s.txRelay.CheckTxPreconfStatus(hash) + if err != nil { + return false, fmt.Errorf("unable to offer preconf: %w", err) + } + return preconf, nil +} + +// Close closes the relay service and all its components +func (s *RelayService) Close() { + if s.txRelay != nil { + s.txRelay.close() + } + if s.privateTxStore != nil { + s.privateTxStore.Close() + } +} diff --git a/eth/relay/service.go b/eth/relay/service.go new file mode 100644 index 0000000000..346ba8e72b --- /dev/null +++ b/eth/relay/service.go @@ -0,0 +1,320 @@ +package relay + +import ( + "errors" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +var ( + errRpcClientUnavailable = errors.New("rpc client unavailable to submit transactions") + errQueueOverflow = errors.New("relay task queue overflow") + errPreconfValidationFailed = errors.New("failed to validate transaction inclusion status for issuing preconf") + errPrivateTxSubmissionFailed = errors.New("private tx submission failed partially, background retry scheduled") +) + +var ( + preconfSubmitTimer = metrics.NewRegisteredTimer("preconfs/submit", nil) + checkTxStatusTimer = metrics.NewRegisteredTimer("preconfs/checkstatus", nil) + privateTxSubmitTimer = metrics.NewRegisteredTimer("privatetx/submit", nil) + + uniquePreconfsTaskMeter = metrics.NewRegisteredMeter("preconfs/tasks", nil) + validPreconfsMeter = metrics.NewRegisteredMeter("preconfs/valid", nil) + invalidPreconfsMeter = metrics.NewRegisteredMeter("preconfs/invalid", nil) + invalidToValidPreconfsMeter = metrics.NewRegisteredMeter("preconfs/invalidtovalid", nil) + txInDbMeter = metrics.NewRegisteredMeter("preconfs/txindb", nil) + + uniquePrivateTxRequestMeter = metrics.NewRegisteredMeter("privatetx/request", nil) + privateTxSubmissionSuccessMeter = metrics.NewRegisteredMeter("privatetx/success", nil) + privateTxSubmissionFailureMeter = metrics.NewRegisteredMeter("privatetx/failure", nil) +) + +// TxGetter defines a function that retrieves a transaction by its hash from local database. +// Returns: found (bool), transaction, blockHash, blockNumber, txIndex +type TxGetter func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) + +type ServiceConfig struct { + expiryTickerInterval time.Duration + expiryInterval time.Duration + maxQueuedTasks int + maxConcurrentTasks int +} + +var DefaultServiceConfig = ServiceConfig{ + expiryTickerInterval: time.Minute, + expiryInterval: 10 * time.Minute, + maxQueuedTasks: 40_000, + maxConcurrentTasks: 1024, +} + +// TxTask represents a transaction submission task +type TxTask struct { + rawtx []byte + hash common.Hash + insertedAt time.Time + + preconfirmed bool // whether block producer preconfirmed the tx or not + err error +} + +type Service struct { + config *ServiceConfig + multiclient *multiClient + store map[common.Hash]TxTask + storeMu sync.RWMutex + taskCh chan TxTask // channel to queue new tasks + semaphore chan struct{} + closeCh chan struct{} // to limit concurrent tasks + + txGetter TxGetter // function to get transaction from local database +} + +func NewService(urls []string, config *ServiceConfig) *Service { + if config == nil { + defaultConfig := DefaultServiceConfig + config = &defaultConfig + } + s := &Service{ + config: config, + multiclient: newMultiClient(urls), + store: make(map[common.Hash]TxTask), + taskCh: make(chan TxTask, config.maxQueuedTasks), + semaphore: make(chan struct{}, config.maxConcurrentTasks), + closeCh: make(chan struct{}), + } + go s.processPreconfTasks() + go s.cleanup() + return s +} + +// SetTxGetter sets the transaction getter function for querying local database +func (s *Service) SetTxGetter(getter TxGetter) { + s.txGetter = getter +} + +// SubmitTransactionForPreconf attempts to queue a transaction submission task for preconf +// and returns true if the task is successfully queued. It fails if either the rpc clients +// are unavailable or if the task queue is full. +func (s *Service) SubmitTransactionForPreconf(tx *types.Transaction) error { + if s.multiclient == nil { + log.Warn("[tx-relay] No rpc client available to submit transactions") + return errRpcClientUnavailable + } + + rawTx, err := tx.MarshalBinary() + if err != nil { + log.Warn("[tx-relay] Failed to marshal transaction", "hash", tx.Hash(), "err", err) + return err + } + + // First check if service is closed/closing + select { + case <-s.closeCh: + log.Info("[tx-relay] Dropping task, service closing", "hash", tx.Hash()) + return errRpcClientUnavailable + default: + } + + // Queue for processing (non-blocking until queue is full) + select { + case s.taskCh <- TxTask{rawtx: rawTx, hash: tx.Hash()}: + return nil + default: + log.Info("[tx-relay] Task queue full, dropping transaction", "hash", tx.Hash()) + return errQueueOverflow + } +} + +// processPreconfTasks continuously picks new tasks from the queue and +// processes them. It rate limits the number of parallel tasks. +func (s *Service) processPreconfTasks() { + for { + select { + case task := <-s.taskCh: + // Acquire semaphore to limit concurrent submissions + s.semaphore <- struct{}{} + go func(task TxTask) { + defer func() { <-s.semaphore }() + s.processPreconfTask(task) + }(task) + case <-s.closeCh: + return + } + } +} + +// processPreconfTask submits the preconf transaction from the task to the block +// producers via multiclient and updates the status in cache. +func (s *Service) processPreconfTask(task TxTask) { + // Capture some metrics + uniquePreconfsTaskMeter.Mark(1) + start := time.Now() + res, err := s.multiclient.submitPreconfTx(task.rawtx) + preconfSubmitTimer.UpdateSince(start) + // It's possible that the calls succeeded but preconf was not offered in which + // case err would be nil. Update with a generic error as preconf wasn't offered. + if !res && err == nil { + err = errPreconfValidationFailed + } + if err != nil { + log.Warn("[tx-relay] failed to submit preconf tx", "err", err) + } + task.preconfirmed = res + task.err = err + // Note: We can purge the raw tx here to save memory. Keeping it + // incase we have some changes in the retry logic. + + s.updateTaskInCache(task) +} + +// updateTaskInCache safely updates or inserts a task in cache by acting as a +// common gateway. A race condition can happen when the process task function +// and check preconf status function try to update the same task concurrently. +// It also ensures that a preconf status once marked is never reverted and +// latest error is preserved. Returns the latest preconf status and error. +func (s *Service) updateTaskInCache(newTask TxTask) (bool, error) { + s.storeMu.Lock() + defer s.storeMu.Unlock() + + existingTask, exists := s.store[newTask.hash] + if !exists { + // Task doesn't exist, create it and update the cache + newTask.insertedAt = time.Now() + if newTask.preconfirmed { + validPreconfsMeter.Mark(1) + } else { + invalidPreconfsMeter.Mark(1) + } + s.store[newTask.hash] = newTask + return newTask.preconfirmed, newTask.err + } + + // If a task already exists and is preconfirmed, skip doing any updates. It + // is possible that first write tries to set preconfirmation status but second + // write contains an error thus making status false. We don't want to revert + // the status in that case. + if existingTask.preconfirmed { + return existingTask.preconfirmed, existingTask.err + } + + invalidToValidPreconfsMeter.Mark(1) + existingTask.preconfirmed = newTask.preconfirmed + existingTask.err = newTask.err + s.store[newTask.hash] = existingTask + return existingTask.preconfirmed, existingTask.err +} + +// CheckTxPreconfStatus checks whether a given transaction hash has been preconfirmed +// or not. It checks things in following order: +// - Checks the availability of preconf status of the task in cache +// - Checks locally if the transaction is already included in a block +// - Queries all block producers via multiclient to get the preconf status +func (s *Service) CheckTxPreconfStatus(hash common.Hash) (bool, error) { + s.storeMu.RLock() + task, exists := s.store[hash] + s.storeMu.RUnlock() + + // If task exists in cache and is already preconfirmed, return immediately + if exists && task.preconfirmed { + return true, nil + } + + // If task is not in cache or not preconfirmed, check locally if the tx + // was included in a block or not. + if s.txGetter != nil { + found, tx, _, _, _ := s.txGetter(hash) + if found && tx != nil { + txInDbMeter.Mark(1) + s.updateTaskInCache(TxTask{hash: hash, preconfirmed: true, err: nil}) + log.Debug("[tx-relay] Transaction found in local database", "hash", hash) + return true, nil + } + } + + if s.multiclient == nil { + return false, errRpcClientUnavailable + } + + // If tx not found locally, query block producers for status + start := time.Now() + res, err := s.multiclient.checkTxStatus(hash) + checkTxStatusTimer.UpdateSince(start) + // It's possible that the calls succeeded but preconf was not offered in which + // case err would be nil. Update with a generic error as preconf wasn't offered. + if !res && err == nil { + err = errPreconfValidationFailed + } + + // Update the task in cache and return the latest status + res, err = s.updateTaskInCache(TxTask{hash: hash, preconfirmed: res, err: err}) + if err != nil { + log.Info("[tx-relay] Unable to validate tx status for preconf", "err", task.err) + } + return res, err +} + +// SubmitPrivateTx attempts to submit a private transaction to all block producers +func (s *Service) SubmitPrivateTx(tx *types.Transaction, retry bool) error { + if s.multiclient == nil { + log.Warn("[tx-relay] No rpc client available to submit transactions") + return errRpcClientUnavailable + } + + rawTx, err := tx.MarshalBinary() + if err != nil { + log.Warn("[tx-relay] Failed to marshal transaction", "hash", tx.Hash(), "err", err) + return err + } + + uniquePrivateTxRequestMeter.Mark(1) + start := time.Now() + err = s.multiclient.submitPrivateTx(rawTx, tx.Hash(), retry, s.txGetter) + privateTxSubmitTimer.UpdateSince(start) + if err != nil { + privateTxSubmissionFailureMeter.Mark(1) + log.Warn("[tx-relay] Error submitting private tx to atleast one block producer", "hash", tx.Hash(), "err", err) + return errPrivateTxSubmissionFailed + } + + privateTxSubmissionSuccessMeter.Mark(1) + return nil +} + +// cleanup is a periodic routine to delete old preconf results +func (s *Service) cleanup() { + ticker := time.NewTicker(s.config.expiryTickerInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + count := 0 + s.storeMu.Lock() + now := time.Now() + for hash, task := range s.store { + if now.Sub(task.insertedAt) > s.config.expiryInterval { + delete(s.store, hash) + count++ + } + } + s.storeMu.Unlock() + if count > 0 { + log.Info("[tx-relay] Purged expired tasks", "count", count) + } + case <-s.closeCh: + return + } + } +} + +func (s *Service) close() { + close(s.closeCh) + if s.multiclient != nil { + s.multiclient.close() + } +} diff --git a/eth/relay/service_test.go b/eth/relay/service_test.go new file mode 100644 index 0000000000..885750ecf3 --- /dev/null +++ b/eth/relay/service_test.go @@ -0,0 +1,1168 @@ +package relay + +import ( + "encoding/json" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +func TestNewService(t *testing.T) { + t.Parallel() + + t.Run("service initializes with valid URLs", func(t *testing.T) { + // Create mock servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 2) + var urls []string = make([]string, 2) + for i := 0; i < 2; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + defer func() { + for _, s := range rpcServers { + s.close() + } + }() + + defaultConfig := DefaultServiceConfig + service := NewService(urls, nil) + require.NotNil(t, service, "expected non-nil service") + require.NotNil(t, service.multiclient, "expected non-nil multiclient") + require.NotNil(t, service.store, "expected non-nil store") + require.NotNil(t, service.taskCh, "expected non-nil task channel") + require.Equal(t, defaultConfig.maxQueuedTasks, cap(service.taskCh), "expected task channel capacity to match maxQueuedTasks") + require.Equal(t, defaultConfig.maxConcurrentTasks, cap(service.semaphore), "expected semaphore capacity to match maxConcurrentTasks") + + service.close() + }) + + t.Run("service initializes with nil multiclient when no URLs", func(t *testing.T) { + service := NewService([]string{}, nil) + require.NotNil(t, service, "expected non-nil service") + require.Nil(t, service.multiclient, "expected nil multiclient with empty URLs") + + service.close() + }) +} + +func TestSubmitTransactionForPreconf(t *testing.T) { + t.Parallel() + + // Create mock servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 2) + var urls []string = make([]string, 2) + for i := 0; i < 2; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + defer func() { + for _, s := range rpcServers { + s.close() + } + }() + + t.Run("error when multiclient is nil", func(t *testing.T) { + service := NewService([]string{}, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.ErrorIs(t, err, errRpcClientUnavailable, "expected errRpcClientUnavailable error on nil multiclient") + }) + + t.Run("queue valid tx for preconf", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Check task was stored + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + require.True(t, task.preconfirmed, "expected task to be preconfirmed") + }) + + t.Run("queue invalid tx for preconf", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Mock the server to send no preconf but accept tx submission + rpcServers[0].handleSendPreconfTx = handleSendPreconfTxWithRejection + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Check task was stored + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + require.False(t, task.preconfirmed, "expected task to be preconfirmed") + require.ErrorIs(t, task.err, errPreconfValidationFailed, "expected preconf validation failed error") + }) + + t.Run("queue overflow with burst submissions", func(t *testing.T) { + // Update the config to a reasonable size for testing + config := DefaultServiceConfig + config.maxQueuedTasks = 10 + config.maxConcurrentTasks = 5 + + service := NewService(urls, &config) + defer service.close() + + // Block the semaphore so that tasks are queued entirely + for i := 0; i < config.maxConcurrentTasks; i++ { + service.semaphore <- struct{}{} + } + + // Fill the queue to full capacity. We need to do config.maxQueuedTasks+1 because + // first task will be consumed. + for i := 0; i <= config.maxQueuedTasks; i++ { + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error for task %d", i) + if i == 0 { + // Wait for a very small delay to allow first task to be consumed + time.Sleep(20 * time.Millisecond) + } + } + + // Next submission should fail due to overflow + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.Error(t, err, "expected error when queue is full") + require.Equal(t, errQueueOverflow, err, "expected errQueueOverflow") + }) + + t.Run("max concurrent tasks", func(t *testing.T) { + // Update the config to a reasonable size for testing + config := DefaultServiceConfig + config.maxQueuedTasks = 10 + config.maxConcurrentTasks = 5 + + // Update the rpc server handlers to have a delay in processing tasks + for _, s := range rpcServers { + s.handleSendPreconfTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + time.Sleep(time.Second) + defaultHandleSendPreconfTx(w, id, params) + } + } + + service := NewService(urls, &config) + defer service.close() + + // Start sending `maxConcurrentTasks` tasks to block the queue + for i := 0; i <= config.maxConcurrentTasks; i++ { + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error for task %d", i) + } + + // While these tasks are being processed, send one more task. + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task within capacity") + + // Check that queue size is 1 (as it should only contain the last task) after a small delay + time.Sleep(100 * time.Millisecond) + queueSize := len(service.taskCh) + require.Equal(t, 1, queueSize, "expected only 1 task in queue") + + // Check again after a small delay + time.Sleep(500 * time.Millisecond) + queueSize = len(service.taskCh) + require.Equal(t, 1, queueSize, "expected only 1 task in queue") + + // Check again after a small delay. By now, at least one of the tasks + // would have been processed. + time.Sleep(500 * time.Millisecond) + queueSize = len(service.taskCh) + require.Equal(t, 0, queueSize, "expected no tasks in queue") + + // Reset all rpc servers + for _, s := range rpcServers { + s.handleSendPreconfTx = defaultHandleSendPreconfTx + } + }) + + t.Run("error when service is closing", func(t *testing.T) { + service := NewService(urls, nil) + + // Close service first + service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.Error(t, err, "expected error when service is closing") + require.Equal(t, errRpcClientUnavailable, err, "expected errRpcClientUnavailable") + }) + + t.Run("concurrent preconf task submissions", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + var wg sync.WaitGroup + numTasks := 2_000 + successCount := atomic.Int32{} + + var nonce atomic.Uint64 + + // Launch goroutines in batches to avoid overwhelming the system + batchSize := 100 + for batch := 0; batch < numTasks/batchSize; batch++ { + for i := 0; i < batchSize; i++ { + wg.Add(1) + idx := batch*batchSize + i + go func(taskIdx int) { + defer wg.Done() + + tx := types.NewTransaction(nonce.Add(1), common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + if err == nil { + successCount.Add(1) + } + }(idx) + } + } + + wg.Wait() + require.Equal(t, int32(numTasks), successCount.Load(), "expected all tasks to be queued without any errors") + + // Wait for all tasks to be processed + time.Sleep(3 * time.Second) + + // Verify tasks were processed + service.storeMu.RLock() + storeSize := len(service.store) + require.Equal(t, numTasks, storeSize, "expected store size to be same as number of tasks") + for hash, task := range service.store { + require.NoError(t, task.err, "expected no error in task %s", hash.Hex()) + require.True(t, task.preconfirmed, "expected task %s to be preconfirmed", hash.Hex()) + } + service.storeMu.RUnlock() + }) +} + +func TestServiceSubmitPrivateTx(t *testing.T) { + t.Parallel() + + // Create mock servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 2) + var urls []string = make([]string, 2) + for i := 0; i < 2; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + defer func() { + for _, s := range rpcServers { + s.close() + } + }() + + t.Run("error when multiclient is nil", func(t *testing.T) { + service := NewService([]string{}, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitPrivateTx(tx, false) + require.ErrorIs(t, err, errRpcClientUnavailable, "expected errRpcClientUnavailable error on nil multiclient") + }) + + t.Run("submit valid private tx", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitPrivateTx(tx, false) + require.NoError(t, err, "expected no error submitting private tx") + }) + + t.Run("error when submission fails", func(t *testing.T) { + // Mock server to fail private tx submissions + rpcServers[0].handleSendPrivateTx = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32601, "internal server error") + } + + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitPrivateTx(tx, false) + require.Equal(t, errPrivateTxSubmissionFailed, err, "expected errPrivateTxSubmissionFailed") + + // Reset handler + rpcServers[0].handleSendPrivateTx = defaultHandleSendPrivateTx + }) + + t.Run("concurrent private tx submissions", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + var wg sync.WaitGroup + numTxs := 50 + successCount := atomic.Int32{} + + for i := 0; i < numTxs; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + tx := types.NewTransaction(uint64(idx), common.Address{}, nil, 0, nil, nil) + err := service.SubmitPrivateTx(tx, false) + if err == nil { + successCount.Add(1) + } + }(i) + } + + wg.Wait() + require.Equal(t, int32(numTxs), successCount.Load(), "expected all private txs to be submitted successfully") + }) +} + +func TestCheckTxPreconfStatus(t *testing.T) { + t.Parallel() + + // Create mock servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 2) + var urls []string = make([]string, 2) + for i := 0; i < 2; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + defer func() { + for _, s := range rpcServers { + s.close() + } + }() + + t.Run("respond task preconfirmation result from cache", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Submit and wait for processing + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err) + time.Sleep(100 * time.Millisecond) + + // Check preconfirmation status + preconfirmed, err := service.CheckTxPreconfStatus(tx.Hash()) + require.NoError(t, err, "expected no error when checking preconf status") + require.True(t, preconfirmed, "expected preconfirmation to be true") + }) + + // Case when task is not available in cache and we do the status check by hash + // against block producers and it passes. + t.Run("check tx status when task not available in cache", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Track call count to ensure checkTxStatus is called + var callCount [2]atomic.Int32 + for i, server := range rpcServers { + server.handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCount[i].Add(1) + defaultHandleTxStatus(w, id, params) + } + } + + // Confirm that unknown tx is not present in cache + unknownHash := common.HexToHash("0x1") + service.storeMu.RLock() + _, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.False(t, exists, "expected task to not exist in cache") + + // Check preconfirmation status + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.NoError(t, err, "expected no error when checking preconf status for unknown tx") + require.True(t, preconfirmed, "expected preconfirmation to be true for unknown tx") + + // Ensure that checkTxStatus was called on all rpc servers + for i := range rpcServers { + require.Equal(t, int32(1), callCount[i].Load(), "expected checkTxStatus to be called once on rpc server %d", i) + } + + // Ensure that cache is updated + service.storeMu.RLock() + task, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.True(t, task.preconfirmed, "expected task to be preconfirmed in cache") + + // Check preconfirmation status again to verify cache hit + preconfirmed, err = service.CheckTxPreconfStatus(unknownHash) + require.NoError(t, err, "expected no error when checking preconf status for unknown tx") + require.True(t, preconfirmed, "expected preconfirmation to be true for unknown tx") + + // Ensure checkTxStatus wasn't called again + for i := range rpcServers { + require.Equal(t, int32(1), callCount[i].Load(), "expected checkTxStatus to be called once on rpc server %d", i) + } + }) + + // Case when task is not available in cache and we do the status check by hash + // against block producers. The call passes but returns false suggesting tx is + // not preconfirmed. + t.Run("tx status returns no preconfirmation when task not available in cache", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Track call count to ensure checkTxStatus is called and it returns no preconf + var callCount [2]atomic.Int32 + handleTxStatus := makeTxStatusHandler(map[common.Hash]txpool.TxStatus{}) + for i, server := range rpcServers { + server.handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCount[i].Add(1) + handleTxStatus(w, id, params) + } + } + + // Confirm that unknown tx is not present in cache + unknownHash := common.HexToHash("0x1") + service.storeMu.RLock() + _, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.False(t, exists, "expected task to not exist in cache") + + // Check preconfirmation status + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.Error(t, err, "expected error when checking preconf status for unknown tx") + require.ErrorIs(t, err, errPreconfValidationFailed, "expected errPreconfValidationFailed") + require.False(t, preconfirmed, "expected preconfirmation to be false for unknown tx") + + // Ensure that checkTxStatus was called on all rpc servers + for i := range rpcServers { + require.Equal(t, int32(1), callCount[i].Load(), "expected checkTxStatus to be called once on rpc server %d", i) + } + + // Ensure that cache is updated + service.storeMu.RLock() + task, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.False(t, task.preconfirmed, "expected task to be not preconfirmed in cache") + require.ErrorIs(t, task.err, errPreconfValidationFailed, "expected task error to be errPreconfValidationFailed") + + // Check preconfirmation status again to ensure tx status is re-checked + preconfirmed, err = service.CheckTxPreconfStatus(unknownHash) + require.Error(t, err, "expected error when checking preconf status for unknown tx") + require.ErrorIs(t, err, errPreconfValidationFailed, "expected errPreconfValidationFailed") + require.False(t, preconfirmed, "expected preconfirmation to be false for unknown tx") + + // Ensure checkTxStatus was called again + for i := range rpcServers { + require.Equal(t, int32(2), callCount[i].Load(), "expected checkTxStatus to be called twice on rpc server %d", i) + } + }) + + // Case when task is not available in cache and we do the status check by hash + // against block producers. The call fails suggesting tx is not preconfirmed. + t.Run("tx status check fails when task not available in cache", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Track call count to ensure checkTxStatus is called and the call fails. + var callCount [2]atomic.Int32 + for i, server := range rpcServers { + server.handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCount[i].Add(1) + defaultSendError(w, id, -32603, "internal server error") + } + } + + // Confirm that unknown tx is not present in cache + unknownHash := common.HexToHash("0x1") + service.storeMu.RLock() + _, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.False(t, exists, "expected task to not exist in cache") + + // Check preconfirmation status + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.Error(t, err, "expected error when checking preconf status for unknown tx") + require.NotErrorIs(t, err, errPreconfValidationFailed, "expected an error other than errPreconfValidationFailed") + require.False(t, preconfirmed, "expected preconfirmation to be false for unknown tx") + + // Ensure that checkTxStatus was called on all rpc servers + for i := range rpcServers { + require.Equal(t, int32(1), callCount[i].Load(), "expected checkTxStatus to be called once on rpc server %d", i) + } + + // Ensure that cache is updated + service.storeMu.RLock() + task, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.False(t, task.preconfirmed, "expected task to be not preconfirmed in cache") + require.NotErrorIs(t, task.err, errPreconfValidationFailed, "expected an error other than errPreconfValidationFailed") + + // Check preconfirmation status again to ensure tx status is re-checked + preconfirmed, err = service.CheckTxPreconfStatus(unknownHash) + require.Error(t, err, "expected error when checking preconf status for unknown tx") + require.NotErrorIs(t, err, errPreconfValidationFailed, "expected an error other than errPreconfValidationFailed") + require.False(t, preconfirmed, "expected preconfirmation to be false for unknown tx") + + // Ensure checkTxStatus was called again + for i := range rpcServers { + require.Equal(t, int32(2), callCount[i].Load(), "expected checkTxStatus to be called twice on rpc server %d", i) + } + }) + + // Case when task is not available in cache and we do the status check by hash + // against block producers. The call fails initially but later passes second time. + t.Run("tx status check fails first and then passes when task not available in cache", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Track call count to ensure checkTxStatus is called and the call fails. + var callCount [2]atomic.Int32 + for i, server := range rpcServers { + server.handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCount[i].Add(1) + defaultSendError(w, id, -32603, "internal server error") + } + } + + // Confirm that unknown tx is not present in cache + unknownHash := common.HexToHash("0x1") + service.storeMu.RLock() + _, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.False(t, exists, "expected task to not exist in cache") + + // Check preconfirmation status + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.Error(t, err, "expected error when checking preconf status for unknown tx") + require.NotErrorIs(t, err, errPreconfValidationFailed, "expected an error other than errPreconfValidationFailed") + require.False(t, preconfirmed, "expected preconfirmation to be false for unknown tx") + + // Ensure that checkTxStatus was called on all rpc servers + for i := range rpcServers { + require.Equal(t, int32(1), callCount[i].Load(), "expected checkTxStatus to be called once on rpc server %d", i) + } + + // Ensure that cache is updated + service.storeMu.RLock() + task, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.False(t, task.preconfirmed, "expected task to be not preconfirmed in cache") + require.NotErrorIs(t, task.err, errPreconfValidationFailed, "expected an error other than errPreconfValidationFailed") + + // Update the handler to return preconfirmed status + for i := range rpcServers { + handleTxStatus := makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + unknownHash: txpool.TxStatusPending, + }) + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + callCount[i].Add(1) + handleTxStatus(w, id, params) + } + } + + // Check preconfirmation status again to ensure tx status is re-checked + preconfirmed, err = service.CheckTxPreconfStatus(unknownHash) + require.NoError(t, err, "expected no error when checking preconf status for unknown tx") + require.True(t, preconfirmed, "expected preconfirmation to be true for unknown tx") + + // Ensure checkTxStatus was called again + for i := range rpcServers { + require.Equal(t, int32(2), callCount[i].Load(), "expected checkTxStatus to be called twice on rpc server %d", i) + } + + // Ensure that cache is updated to preconfirmed + service.storeMu.RLock() + task, exists = service.store[unknownHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.True(t, task.preconfirmed, "expected task to be preconfirmed in cache") + + // Ensure checkTxStatus wasn't called again to verify cache hit + for i := range rpcServers { + require.Equal(t, int32(2), callCount[i].Load(), "expected checkTxStatus to be called twice on rpc server %d", i) + } + }) + + t.Run("re-checks status when not preconfirmed initially", func(t *testing.T) { + // Mock servers to reject preconf initially + rpcServers[0].handleSendPreconfTx = handleSendPreconfTxWithRejection + + service := NewService(urls, nil) + defer service.close() + + // Submit and wait for processing + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err) + time.Sleep(200 * time.Millisecond) + + // Ensure that the preconfirmation task is stored as not preconfirmed + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored") + require.False(t, task.preconfirmed, "expected task to be not preconfirmed") + + // Mock servers to return unknown tx status on initial status + for i := range rpcServers { + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{}) + } + + // Check status - should re-check via checkTxStatus + preconfirmed, err := service.CheckTxPreconfStatus(tx.Hash()) + require.Equal(t, errPreconfValidationFailed, err, "expected errPreconfValidationFailed") + require.False(t, preconfirmed, "expected preconfirmed to be false after re-check") + + // Now update the mock servers to return pending status + for i := range rpcServers { + rpcServers[i].handleTxStatus = makeTxStatusHandler(map[common.Hash]txpool.TxStatus{ + tx.Hash(): txpool.TxStatusPending, + }) + } + + // Check status - should again re-check via checkTxStatus + preconfirmed, err = service.CheckTxPreconfStatus(tx.Hash()) + require.NoError(t, err, "expected no error on re-check with pending status") + require.True(t, preconfirmed, "expected preconfirmed to be true after re-check") + + // Reset handlers + for i := range rpcServers { + rpcServers[i].handleTxStatus = defaultHandleTxStatus + rpcServers[i].handleSendPreconfTx = defaultHandleSendPreconfTx + } + }) + + t.Run("tx found in local database via txGetter", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Track if checkTxStatus is called (it shouldn't be) + var checkTxStatusCalled atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + checkTxStatusCalled.Add(1) + defaultHandleTxStatus(w, id, params) + } + } + + // Create a transaction that will be "found" in local database + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + txHash := tx.Hash() + + // Set up mock txGetter that returns the transaction + service.SetTxGetter(func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + if hash == txHash { + return true, tx, common.Hash{}, 0, 0 + } + return false, nil, common.Hash{}, 0, 0 + }) + + // Check preconfirmation status - should find in local DB + preconfirmed, err := service.CheckTxPreconfStatus(txHash) + require.NoError(t, err, "expected no error when tx found in local database") + require.True(t, preconfirmed, "expected preconfirmation to be true when tx found in local database") + + // Verify checkTxStatus was not called + require.Equal(t, int32(0), checkTxStatusCalled.Load(), "expected checkTxStatus to not be called when tx found in local database") + + // Verify cache was updated + service.storeMu.RLock() + task, exists := service.store[txHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.True(t, task.preconfirmed, "expected task to be preconfirmed in cache") + require.NoError(t, task.err, "expected no error as tx was found in local database") + + // Check again - should hit cache and not call txGetter or checkTxStatus + preconfirmed, err = service.CheckTxPreconfStatus(txHash) + require.NoError(t, err, "expected no error on second check") + require.True(t, preconfirmed, "expected preconfirmation to be true on second check") + require.Equal(t, int32(0), checkTxStatusCalled.Load(), "expected checkTxStatus to still not be called") + }) + + t.Run("tx not found in local database falls back to checkTxStatus", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Track if checkTxStatus is called + var checkTxStatusCalled atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + checkTxStatusCalled.Add(1) + defaultHandleTxStatus(w, id, params) + } + } + + unknownHash := common.HexToHash("0x1") + + // Set up mock txGetter that doesn't find the transaction + var txGetterCalled atomic.Int32 + service.SetTxGetter(func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + txGetterCalled.Add(1) + return false, nil, common.Hash{}, 0, 0 + }) + + // Check preconfirmation status - should fall back to checkTxStatus + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.NoError(t, err, "expected no error when falling back to checkTxStatus") + require.True(t, preconfirmed, "expected preconfirmation to be true from checkTxStatus") + + // Verify txGetter was called + require.Equal(t, int32(1), txGetterCalled.Load(), "expected txGetter to be called once") + + // Verify checkTxStatus was called as fallback + require.Equal(t, int32(2), checkTxStatusCalled.Load(), "expected checkTxStatus to be called on both servers") + + // Verify cache was updated + service.storeMu.RLock() + task, exists := service.store[unknownHash] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored in cache") + require.True(t, task.preconfirmed, "expected task to be preconfirmed in cache") + }) + + t.Run("txGetter not set falls back to checkTxStatus", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Don't set txGetter - it should fall back to checkTxStatus + + // Track if checkTxStatus is called + var checkTxStatusCalled atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + checkTxStatusCalled.Add(1) + defaultHandleTxStatus(w, id, params) + } + } + + unknownHash := common.HexToHash("0x1") + + // Check preconfirmation status - should go straight to checkTxStatus + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.NoError(t, err, "expected no error when txGetter not set") + require.True(t, preconfirmed, "expected preconfirmation to be true from checkTxStatus") + + // Verify checkTxStatus was called + require.Equal(t, int32(2), checkTxStatusCalled.Load(), "expected checkTxStatus to be called on both servers") + }) + + t.Run("tx found in local database updates cache for non-preconfirmed task", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Submit tx that gets rejected + rpcServers[0].handleSendPreconfTx = handleSendPreconfTxWithRejection + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err) + time.Sleep(100 * time.Millisecond) + + // Verify task exists but not preconfirmed + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored") + require.False(t, task.preconfirmed, "expected task to be not preconfirmed initially") + + // Set up txGetter that finds the transaction (simulating it got included) + service.SetTxGetter(func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + if hash == tx.Hash() { + return true, tx, common.Hash{}, 0, 0 + } + return false, nil, common.Hash{}, 0, 0 + }) + + // Track if checkTxStatus is called (it shouldn't be) + var checkTxStatusCalled atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + checkTxStatusCalled.Add(1) + defaultHandleTxStatus(w, id, params) + } + } + + // Check status - should find in local DB and update cache + preconfirmed, err := service.CheckTxPreconfStatus(tx.Hash()) + require.NoError(t, err, "expected no error when tx found in local database") + require.True(t, preconfirmed, "expected preconfirmation to be true when tx found in local database") + + // Verify checkTxStatus was not called + require.Equal(t, int32(0), checkTxStatusCalled.Load(), "expected checkTxStatus to not be called") + + // Verify cache was updated to preconfirmed + service.storeMu.RLock() + task, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to still be stored") + require.True(t, task.preconfirmed, "expected task to be preconfirmed after update") + require.NoError(t, task.err, "expected task error to be nil after update") + + // Reset handler + rpcServers[0].handleSendPreconfTx = defaultHandleSendPreconfTx + }) + + t.Run("txGetter returns error still falls back to checkTxStatus", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + unknownHash := common.HexToHash("0xabcd") + + // Set up txGetter that returns false (not found) + var txGetterCalled atomic.Int32 + service.SetTxGetter(func(hash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) { + txGetterCalled.Add(1) + // Return false indicating not found + return false, nil, common.Hash{}, 0, 0 + }) + + // Track if checkTxStatus is called + var checkTxStatusCalled atomic.Int32 + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + checkTxStatusCalled.Add(1) + defaultHandleTxStatus(w, id, params) + } + } + + // Check status - should try txGetter then fall back to checkTxStatus + preconfirmed, err := service.CheckTxPreconfStatus(unknownHash) + require.NoError(t, err, "expected no error") + require.True(t, preconfirmed, "expected preconfirmation from checkTxStatus") + + // Verify both were called + require.Equal(t, int32(1), txGetterCalled.Load(), "expected txGetter to be called") + require.Equal(t, int32(2), checkTxStatusCalled.Load(), "expected checkTxStatus to be called as fallback") + }) +} + +// TestTaskCacheOverride tests scenarios where the task cache is being updated +// by multiple services - one being the main process task and other being the +// check preconf status. +func TestTaskCacheOverride(t *testing.T) { + t.Parallel() + + // Create mock servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 2) + var urls []string = make([]string, 2) + for i := 0; i < 2; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + defer func() { + for _, s := range rpcServers { + s.close() + } + }() + + t.Run("updateTaskInCache handles writing tasks to cache as expected", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + task := TxTask{ + hash: tx.Hash(), + preconfirmed: false, + err: errPreconfValidationFailed, + insertedAt: time.Now(), + } + + service.updateTaskInCache(task) + + // Check if the cache was updated + service.storeMu.RLock() + cachedTask, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to exist in cache") + require.Equal(t, task.preconfirmed, cachedTask.preconfirmed, "expected preconfirmed status to match") + require.Equal(t, task.err, cachedTask.err, "expected error to match") + + // Update the error and try to write the task again + task.err = errRelayNotConfigured + service.updateTaskInCache(task) + + // Check if the cache was updated with new error + service.storeMu.RLock() + cachedTask, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to exist in cache") + require.Equal(t, task.preconfirmed, cachedTask.preconfirmed, "expected preconfirmed status to match") + require.Equal(t, task.err, cachedTask.err, "expected error to be updated in cache") + + // Update preconfirmed to true and error to nil + task.preconfirmed = true + task.err = nil + service.updateTaskInCache(task) + + // Check if the cache was updated with new values + service.storeMu.RLock() + cachedTask, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to exist in cache") + require.Equal(t, task.preconfirmed, cachedTask.preconfirmed, "expected preconfirmed status to be updated in cache") + require.Equal(t, task.err, cachedTask.err, "expected error to be updated in cache") + + // Try to change the preconf status which should fail + task.preconfirmed = false + task.err = errPreconfValidationFailed + service.updateTaskInCache(task) + + // Check that the cache still has preconfirmed=true and err=nil + service.storeMu.RLock() + cachedTask, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to exist in cache") + require.True(t, cachedTask.preconfirmed, "expected preconfirmed status to remain true in cache") + require.NoError(t, cachedTask.err, "expected error to remain nil in cache") + }) + + t.Run("processPreconfTask succeeds and CheckTxPreconfStatus try to update same task", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Ensure task was stored correctly + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + require.True(t, task.preconfirmed, "expected task to be preconfirmed") + require.NoError(t, task.err, "expected no error in task") + + // Now, simulate a scenario where CheckTxPreconfStatus tries to update the same task + // with a non-preconfirmed status. It won't be possible in reality as the task + // will be available in cache. + invalidTask := TxTask{ + hash: tx.Hash(), + preconfirmed: false, + err: errPreconfValidationFailed, + insertedAt: time.Now(), + } + service.updateTaskInCache(invalidTask) + + // Verify that the original preconfirmed task remains unchanged + service.storeMu.RLock() + task, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to still exist in cache") + require.True(t, task.preconfirmed, "expected preconfirmed status to remain true") + require.NoError(t, task.err, "expected error to remain nil") + }) + + t.Run("processPreconfTask fails and CheckTxPreconfStatus try to update same task", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + // Reject the preconf tx to simulate failure + rpcServers[0].handleSendPreconfTx = handleSendPreconfTxWithRejection + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Ensure task was stored correctly + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + require.False(t, task.preconfirmed, "expected task to not be preconfirmed") + require.ErrorIs(t, task.err, errPreconfValidationFailed, "expected errPreconfValidationFailed in task") + + // Now, CheckTxPreconfStatus tries to update the same task + res, err := service.CheckTxPreconfStatus(tx.Hash()) + require.Equal(t, true, res, "expected valid preconf to be returned") + require.NoError(t, err, "expected no error from CheckTxPreconfStatus") + + // Ensure the underlying task was updated to preconfirmed + service.storeMu.RLock() + task, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to still exist in cache") + require.True(t, task.preconfirmed, "expected preconfirmed status to be updated to true") + require.NoError(t, task.err, "expected error to be updated to nil") + + // Re-run `SubmitTransactionForPreconf` to force a write with invalid preconf + // status. It should not override the existing status. + err = service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task again") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Verify that the preconfirmed task remains unchanged + service.storeMu.RLock() + task, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to still exist in cache") + require.True(t, task.preconfirmed, "expected preconfirmed status to remain true") + require.NoError(t, task.err, "expected error to remain nil") + + // Reset handler + rpcServers[0].handleSendPreconfTx = defaultHandleSendPreconfTx + }) + + t.Run("CheckTxPreconfStatus succeeds and processPreconfTask try to update same task", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + + // Check the preconf status directly + res, err := service.CheckTxPreconfStatus(tx.Hash()) + require.Equal(t, true, res, "expected valid preconf to be returned") + require.NoError(t, err, "expected no error from CheckTxPreconfStatus") + + // Ensure task was stored correctly in cache + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + require.True(t, task.preconfirmed, "expected task to be preconfirmed") + require.NoError(t, task.err, "expected no error in task") + + // Reject the preconf tx to simulate failure + rpcServers[0].handleSendPreconfTx = handleSendPreconfTxWithRejection + + // Now, simulate a scenario where processPreconfTask tries to update the same task + // with a non-preconfirmed status. + err = service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Verify that the original preconfirmed task remains unchanged + service.storeMu.RLock() + task, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to still exist in cache") + require.True(t, task.preconfirmed, "expected preconfirmed status to remain true") + require.NoError(t, task.err, "expected error to remain nil") + + // Reset handler + rpcServers[0].handleSendPreconfTx = defaultHandleSendPreconfTx + }) + + t.Run("CheckTxPreconfStatus fails and processPreconfTask try to update same task", func(t *testing.T) { + service := NewService(urls, nil) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + + // Mock the rpc server to reject the tx status call + for i := range rpcServers { + rpcServers[i].handleTxStatus = func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32603, "internal server error") + } + } + + // Check the preconf status directly + res, err := service.CheckTxPreconfStatus(tx.Hash()) + require.Equal(t, false, res, "expected an invalid preconf to be returned") + require.Error(t, err, "expected an error from CheckTxPreconfStatus") + + // Ensure task was stored correctly in cache + service.storeMu.RLock() + task, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + require.False(t, task.preconfirmed, "expected preconfirmed to be false") + require.Error(t, task.err, "expected an error in task") + + // Now, simulate a scenario where processPreconfTask tries to update the same task + // with a preconfirmed status. + err = service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Verify that the underlying task is now updated + service.storeMu.RLock() + task, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to still exist in cache") + require.True(t, task.preconfirmed, "expected preconfirmed status to be true") + require.NoError(t, task.err, "expected no error for the task") + + // Ensure that preconf status will now return valid result from cache + res, err = service.CheckTxPreconfStatus(tx.Hash()) + require.Equal(t, true, res, "expected valid preconf to be returned from cache") + require.NoError(t, err, "expected no error from CheckTxPreconfStatus") + }) +} + +func TestTaskCleanup(t *testing.T) { + t.Parallel() + + // Create mock servers + var rpcServers []*mockRpcServer = make([]*mockRpcServer, 2) + var urls []string = make([]string, 2) + for i := 0; i < 2; i++ { + rpcServers[i] = newMockRpcServer() + urls[i] = rpcServers[i].server.URL + } + defer func() { + for _, s := range rpcServers { + s.close() + } + }() + + // Use a short expiry interval for testing + config := DefaultServiceConfig + config.expiryTickerInterval = 200 * time.Millisecond + config.expiryInterval = time.Second + + service := NewService(urls, &config) + defer service.close() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + err := service.SubmitTransactionForPreconf(tx) + require.NoError(t, err, "expected no error queuing task") + + // Give some time to process + time.Sleep(100 * time.Millisecond) + + // Check task was stored + service.storeMu.RLock() + _, exists := service.store[tx.Hash()] + service.storeMu.RUnlock() + require.True(t, exists, "expected task to be stored after processing") + + // Wait for longer than expiry interval to allow cleanup to run + time.Sleep(time.Second + 200*time.Millisecond) + + // Check task was deleted + service.storeMu.RLock() + _, exists = service.store[tx.Hash()] + service.storeMu.RUnlock() + require.False(t, exists, "expected task to be deleted after expiry interval") +} diff --git a/eth/sync.go b/eth/sync.go index 141c543e0e..9870ebe928 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -39,6 +39,9 @@ func (h *handler) syncTransactions(p *eth.Peer) { var hashes []common.Hash for _, batch := range h.txpool.Pending(txpool.PendingFilter{BlobTxs: false}, nil) { for _, tx := range batch { + if h.privateTxGetter != nil && h.privateTxGetter.IsTxPrivate(tx.Hash) { + continue + } hashes = append(hashes, tx.Hash) } } diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 082edcb2c8..eec465f4f3 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -164,6 +164,9 @@ type Config struct { // HealthConfig has health check related settings Health *HealthConfig `hcl:"health,block" toml:"health,block"` + + // Relay has transaction relay related settings + Relay *RelayConfig `hcl:"relay,block" toml:"relay,block"` } type HistoryConfig struct { @@ -276,6 +279,9 @@ type P2PConfig struct { // TxAnnouncementOnly is used to only announce transactions to peers TxAnnouncementOnly bool `hcl:"txannouncementonly,optional" toml:"txannouncementonly,optional"` + + // DisableTxPropagation disables transaction broadcast and announcement completely to its peers + DisableTxPropagation bool `hcl:"disable-tx-propagation,optional" toml:"disable-tx-propagation,optional"` } type P2PDiscovery struct { @@ -474,6 +480,12 @@ type JsonRPCConfig struct { // Maximum allowed timeout for eth_sendRawTransactionSync (e.g. 5m) TxSyncMaxTimeout time.Duration `hcl:"-,optional" toml:"-"` TxSyncMaxTimeoutRaw string `hcl:"txsync.maxtimeout,optional" toml:"txsync.maxtimeout,optional"` + + // AcceptPreconfTx allows the RPC server to accept preconf transactions + AcceptPreconfTx bool `hcl:"accept-preconf-tx,optional" toml:"accept-preconf-tx,optional"` + + // AcceptPrivateTx allows the RPC server to accept private transactions + AcceptPrivateTx bool `hcl:"accept-private-tx,optional" toml:"accept-private-tx,optional"` } type AUTHConfig struct { @@ -756,6 +768,17 @@ type WitnessConfig struct { FastForwardThreshold uint64 `hcl:"fastforwardthreshold,optional" toml:"fastforwardthreshold,optional"` } +type RelayConfig struct { + // EnablePreconfs enables relay to accept transactions for preconfs + EnablePreconfs bool `hcl:"enable-preconfs,optional" toml:"enable-preconfs,optional"` + + // EnablePrivateTx enables relaying transactions privately to block producers + EnablePrivateTx bool `hcl:"enable-private-tx,optional" toml:"enable-private-tx,optional"` + + // BlockProducerRpcEndpoints is a list of block producer rpc endpoints to submit transactions to + BlockProducerRpcEndpoints []string `hcl:"bp-rpc-endpoints,optional" toml:"bp-rpc-endpoints,optional"` +} + func DefaultConfig() *Config { return &Config{ Chain: "mainnet", @@ -781,15 +804,16 @@ func DefaultConfig() *Config { RPCBatchLimit: 100, RPCReturnDataLimit: 100000, P2P: &P2PConfig{ - MaxPeers: 50, - MaxPendPeers: 50, - Bind: "0.0.0.0", - Port: 30303, - NoDiscover: false, - NAT: "any", - NetRestrict: "", - TxArrivalWait: 500 * time.Millisecond, - TxAnnouncementOnly: false, + MaxPeers: 50, + MaxPendPeers: 50, + Bind: "0.0.0.0", + Port: 30303, + NoDiscover: false, + NAT: "any", + NetRestrict: "", + TxArrivalWait: 500 * time.Millisecond, + TxAnnouncementOnly: false, + DisableTxPropagation: false, Discovery: &P2PDiscovery{ DiscoveryV4: true, DiscoveryV5: true, @@ -868,6 +892,8 @@ func DefaultConfig() *Config { EnablePersonal: false, TxSyncDefaultTimeout: ethconfig.Defaults.TxSyncDefaultTimeout, TxSyncMaxTimeout: ethconfig.Defaults.TxSyncMaxTimeout, + AcceptPreconfTx: false, + AcceptPrivateTx: false, Http: &APIConfig{ Enabled: false, Port: 8545, @@ -1002,6 +1028,11 @@ func DefaultConfig() *Config { MinPeerThreshold: 0, WarnPeerThreshold: 0, }, + Relay: &RelayConfig{ + EnablePreconfs: false, + EnablePrivateTx: false, + BlockProducerRpcEndpoints: []string{}, + }, } } @@ -1550,6 +1581,15 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.DisableBlindForkValidation = c.DisableBlindForkValidation n.MaxBlindForkValidationLimit = c.MaxBlindForkValidationLimit + // Set preconf / private transaction flags for relay + n.EnablePreconfs = c.Relay.EnablePreconfs + n.EnablePrivateTx = c.Relay.EnablePrivateTx + n.BlockProducerRpcEndpoints = c.Relay.BlockProducerRpcEndpoints + + // Set preconf / private transaction flags for block producers + n.AcceptPreconfTx = c.JsonRPC.AcceptPreconfTx + n.AcceptPrivateTx = c.JsonRPC.AcceptPrivateTx + return &n, nil } @@ -1723,13 +1763,14 @@ func (c *Config) buildNode() (*node.Config, error) { AllowUnprotectedTxs: c.JsonRPC.AllowUnprotectedTxs, EnablePersonal: c.JsonRPC.EnablePersonal, P2P: p2p.Config{ - MaxPeers: int(c.P2P.MaxPeers), - MaxPendingPeers: int(c.P2P.MaxPendPeers), - ListenAddr: c.P2P.Bind + ":" + strconv.Itoa(int(c.P2P.Port)), - DiscoveryV4: c.P2P.Discovery.DiscoveryV4, - DiscoveryV5: c.P2P.Discovery.DiscoveryV5, - TxArrivalWait: c.P2P.TxArrivalWait, - TxAnnouncementOnly: c.P2P.TxAnnouncementOnly, + MaxPeers: int(c.P2P.MaxPeers), + MaxPendingPeers: int(c.P2P.MaxPendPeers), + ListenAddr: c.P2P.Bind + ":" + strconv.Itoa(int(c.P2P.Port)), + DiscoveryV4: c.P2P.Discovery.DiscoveryV4, + DiscoveryV5: c.P2P.Discovery.DiscoveryV5, + TxArrivalWait: c.P2P.TxArrivalWait, + TxAnnouncementOnly: c.P2P.TxAnnouncementOnly, + DisableTxPropagation: c.P2P.DisableTxPropagation, }, HTTPModules: c.JsonRPC.Http.API, HTTPCors: c.JsonRPC.Http.Cors, diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 5984ff9398..d697b9506e 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -773,6 +773,20 @@ func (c *Command) Flags(config *Config) *flagset.Flagset { Default: c.cliConfig.JsonRPC.Graphql.VHost, Group: "JsonRPC", }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "accept-preconf-tx", + Usage: "Allows the RPC server to accept transactions for preconfirmation", + Value: &c.cliConfig.JsonRPC.AcceptPreconfTx, + Default: c.cliConfig.JsonRPC.AcceptPreconfTx, + Group: "JsonRPC", + }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "accept-private-tx", + Usage: "Allows the RPC server to accept private transactions", + Value: &c.cliConfig.JsonRPC.AcceptPrivateTx, + Default: c.cliConfig.JsonRPC.AcceptPrivateTx, + Group: "JsonRPC", + }) // http options f.BoolFlag(&flagset.BoolFlag{ @@ -984,6 +998,13 @@ func (c *Command) Flags(config *Config) *flagset.Flagset { Default: c.cliConfig.P2P.TxAnnouncementOnly, Group: "P2P", }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "disable-tx-propagation", + Usage: "Disable transaction broadcast and announcements to all peers", + Value: &c.cliConfig.P2P.DisableTxPropagation, + Default: c.cliConfig.P2P.DisableTxPropagation, + Group: "P2P", + }) f.SliceStringFlag(&flagset.SliceStringFlag{ Name: "discovery.dns", Usage: "Comma separated list of enrtree:// URLs which will be queried for nodes to connect to", @@ -1316,5 +1337,28 @@ func (c *Command) Flags(config *Config) *flagset.Flagset { Group: "Health", }) + // Relay related flags + f.BoolFlag(&flagset.BoolFlag{ + Name: "relay.enable-preconfs", + Usage: "Enable transaction preconfirmations", + Value: &c.cliConfig.Relay.EnablePreconfs, + Default: c.cliConfig.Relay.EnablePreconfs, + Group: "P2P", + }) + f.BoolFlag(&flagset.BoolFlag{ + Name: "relay.enable-private-tx", + Usage: "Enable private transaction submission", + Value: &c.cliConfig.Relay.EnablePrivateTx, + Default: c.cliConfig.Relay.EnablePrivateTx, + Group: "P2P", + }) + f.SliceStringFlag(&flagset.SliceStringFlag{ + Name: "relay.bp-rpc-endpoints", + Usage: "Comma separated rpc endpoints of all block producers", + Value: &c.cliConfig.Relay.BlockProducerRpcEndpoints, + Default: c.cliConfig.Relay.BlockProducerRpcEndpoints, + Group: "P2P", + }) + return f } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 107076ca64..62b5f24646 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -41,6 +41,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/stateless" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -260,6 +261,16 @@ func (api *TxPoolAPI) Status() map[string]hexutil.Uint { } } +// TxStatus returns the current status of a transaction in the pool given transaction hash. +// Returns +// - 0 if status is unknown. +// - 1 if status is queued. +// - 2 if status is pending. +// Note that because it only checks in txpool, it doesn't return 'included' status. +func (api *TxPoolAPI) TxStatus(hash common.Hash) txpool.TxStatus { + return api.b.TxStatus(hash) +} + // Inspect retrieves the content of the transaction pool and flattens it into an // easily inspectable list. func (api *TxPoolAPI) Inspect() map[string]map[string]map[string]string { @@ -2011,6 +2022,21 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c log.Info("Submitted transaction", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "recipient", tx.To(), "value", tx.Value()) } + // If preconf / private tx is enabled, submit tx directly to BP + if b.PreconfEnabled() { + // Preconf processing mostly happens in background so don't float the error back to user + if err := b.SubmitTxForPreconf(tx); err != nil { + log.Error("Transaction accepted locally but submission for preconf failed", "err", err) + } + } else if b.PrivateTxEnabled() { + // Return an error here to inform user that private tx submission failed as it is critical. + // Note that it will be retried in background. + if err := b.SubmitPrivateTx(tx); err != nil { + log.Error("Private tx accepted locally but submission failed", "err", err) + return tx.Hash(), fmt.Errorf("private tx accepted locally, submission failed. reason: %w", err) + } + } + return tx.Hash(), nil } @@ -2201,6 +2227,77 @@ func (api *TransactionAPI) SendRawTransactionSync(ctx context.Context, input hex } } +// SendRawTransactionForPreconf will accept a preconf transaction from relay if enabled. It will +// offer a soft inclusion confirmation if the transaction is accepted into the pending pool. +func (api *TransactionAPI) SendRawTransactionForPreconf(ctx context.Context, input hexutil.Bytes) (map[string]interface{}, error) { + if !api.b.AcceptPreconfTxs() { + return nil, errors.New("preconf transactions are not accepted on this node") + } + + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(input); err != nil { + return nil, err + } + + hash, err := SubmitTransaction(ctx, api.b, tx) + // If it's any error except `ErrAlreadyKnown`, return the error back. + if err != nil && !errors.Is(err, txpool.ErrAlreadyKnown) { + return nil, err + } + + if errors.Is(err, txpool.ErrAlreadyKnown) { + // If the tx is already known, update the hash. Skip the wait + // to check the tx pool status. + hash = tx.Hash() + } else { + // Check tx status leaving a small delay for internal pool rearrangements + // TODO: try to have a better estimate for this or replace with a subscription + time.Sleep(100 * time.Millisecond) + } + + txStatus := api.b.TxStatus(hash) + var txConfirmed bool + if txStatus == txpool.TxStatusPending { + txConfirmed = true + } + + return map[string]interface{}{ + "hash": hash, + "preconfirmed": txConfirmed, + }, nil +} + +// SendRawTransactionForPreconf will accept a private transaction from relay if enabled. It will ensure +// that the transaction is not gossiped over public network. +func (api *TransactionAPI) SendRawTransactionPrivate(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { + if !api.b.AcceptPrivateTxs() { + return common.Hash{}, errors.New("private transactions are not accepted on this node") + } + + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(input); err != nil { + return common.Hash{}, err + } + + // Track the tx hash to ensure it is not gossiped in public + api.b.RecordPrivateTx(tx.Hash()) + + hash, err := SubmitTransaction(ctx, api.b, tx) + if err != nil { + // Purge tx from private tx tracker if submission failed + api.b.PurgePrivateTx(tx.Hash()) + } + + return hash, err +} + +func (api *TransactionAPI) CheckPreconfStatus(ctx context.Context, hash common.Hash) (bool, error) { + if !api.b.PreconfEnabled() { + return false, errors.New("preconf transactions are not accepted on this node") + } + return api.b.CheckPreconfStatus(hash) +} + // Sign calculates an ECDSA signature for: // keccak256("\x19Ethereum Signed Message:\n" + len(message) + message). // diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 83b6e5318b..edd9175ab8 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -50,6 +50,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/stateless" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -484,6 +485,16 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E return backend } +func (b testBackend) PreconfEnabled() bool { return false } +func (b testBackend) SubmitTxForPreconf(tx *types.Transaction) error { return nil } +func (b testBackend) CheckPreconfStatus(hash common.Hash) (bool, error) { return false, nil } +func (b testBackend) PrivateTxEnabled() bool { return false } +func (b testBackend) SubmitPrivateTx(tx *types.Transaction) error { return nil } +func (b testBackend) AcceptPreconfTxs() bool { return false } +func (b testBackend) AcceptPrivateTxs() bool { return false } +func (b testBackend) RecordPrivateTx(hash common.Hash) {} +func (b testBackend) PurgePrivateTx(hash common.Hash) {} + func (b testBackend) SyncProgress(ctx context.Context) ethereum.SyncProgress { return ethereum.SyncProgress{} } @@ -688,6 +699,9 @@ func (b testBackend) TxPoolContent() (map[common.Address][]*types.Transaction, m func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { panic("implement me") } +func (b testBackend) TxStatus(hash common.Hash) txpool.TxStatus { + panic("implement me") +} func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription { panic("implement me") } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index a44856f1ff..25d3ff5f97 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/stateless" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" @@ -60,6 +61,19 @@ type Backend interface { RPCTxSyncDefaultTimeout() time.Duration RPCTxSyncMaxTimeout() time.Duration + // Preconf / Private tx related API for relay + PreconfEnabled() bool + SubmitTxForPreconf(tx *types.Transaction) error + CheckPreconfStatus(hash common.Hash) (bool, error) + PrivateTxEnabled() bool + SubmitPrivateTx(tx *types.Transaction) error + + // Preconf / Private tx related API for block producers + AcceptPreconfTxs() bool + AcceptPrivateTxs() bool + RecordPrivateTx(hash common.Hash) + PurgePrivateTx(hash common.Hash) + // Blockchain API SetHead(number uint64) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) @@ -91,6 +105,7 @@ type Backend interface { Stats() (pending int, queued int) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) + TxStatus(hash common.Hash) txpool.TxStatus SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription ChainConfig() *params.ChainConfig diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 4afe6fec89..accac1b5bb 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/core/filtermaps" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/stateless" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" @@ -318,6 +319,16 @@ func (b *backendMock) setFork(fork string) error { return nil } +func (b *backendMock) PreconfEnabled() bool { return false } +func (b *backendMock) SubmitTxForPreconf(tx *types.Transaction) error { return nil } +func (b *backendMock) CheckPreconfStatus(hash common.Hash) (bool, error) { return false, nil } +func (b *backendMock) PrivateTxEnabled() bool { return false } +func (b *backendMock) SubmitPrivateTx(tx *types.Transaction) error { return nil } +func (b *backendMock) AcceptPreconfTxs() bool { return false } +func (b *backendMock) AcceptPrivateTxs() bool { return false } +func (b *backendMock) RecordPrivateTx(hash common.Hash) {} +func (b *backendMock) PurgePrivateTx(hash common.Hash) {} + func (b *backendMock) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { return big.NewInt(42), nil } @@ -416,6 +427,9 @@ func (b *backendMock) TxPoolContent() (map[common.Address][]*types.Transaction, func (b *backendMock) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { return nil, nil } +func (b *backendMock) TxStatus(hash common.Hash) txpool.TxStatus { + return txpool.TxStatusUnknown +} func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil } func (b *backendMock) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return nil } func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { diff --git a/p2p/config.go b/p2p/config.go index 80fe658a8e..d7e29b8423 100644 --- a/p2p/config.go +++ b/p2p/config.go @@ -133,6 +133,9 @@ type Config struct { // TxAnnouncementOnly is used to only announce transactions to peers TxAnnouncementOnly bool + + // DisableTxPropagation disables transaction broadcast and announcement completely to its peers + DisableTxPropagation bool } type configMarshaling struct {