diff --git a/blockchain/indexers/flatutreexoproofindex.go b/blockchain/indexers/flatutreexoproofindex.go index d27a088b..e8d0ab4b 100644 --- a/blockchain/indexers/flatutreexoproofindex.go +++ b/blockchain/indexers/flatutreexoproofindex.go @@ -11,7 +11,6 @@ import ( "fmt" "os" "path/filepath" - "reflect" "sync" "time" @@ -41,11 +40,6 @@ const ( // proof index. This name is used as the dataFile name in the flat files. flatUtreexoUndoName = "undo" - // flatRememberIdxName is the name given to the remember idx data of the flat - // utreexo proof index. This name is used as the dataFile name in the flat - // files. - flatRememberIdxName = "remember" - // flatUtreexoProofStatsName is the name given to the proof stats data of the flat // utreexo proof index. This name is used as the dataFile name in the flat // files. @@ -82,12 +76,10 @@ var _ NeedsInputser = (*FlatUtreexoProofIndex)(nil) // FlatUtreexoProofIndex implements a utreexo accumulator proof index for all the blocks. // In a flat file. type FlatUtreexoProofIndex struct { - proofGenInterVal int32 - proofState FlatFileState - undoState FlatFileState - rememberIdxState FlatFileState - proofStatsState FlatFileState - rootsState FlatFileState + proofState FlatFileState + undoState FlatFileState + proofStatsState FlatFileState + rootsState FlatFileState // All the configurable metadata. config *UtreexoConfig @@ -149,17 +141,6 @@ func (idx *FlatUtreexoProofIndex) consistentFlatFileState(tipHeight int32) error } } - if idx.rememberIdxState.BestHeight() != 0 && - tipHeight < idx.rememberIdxState.BestHeight() { - bestHeight := idx.rememberIdxState.BestHeight() - for tipHeight != bestHeight && bestHeight > 0 { - err := idx.rememberIdxState.DisconnectBlock(bestHeight) - if err != nil { - return err - } - bestHeight-- - } - } if idx.proofStatsState.BestHeight() != 0 && tipHeight < idx.proofStatsState.BestHeight() { bestHeight := idx.proofStatsState.BestHeight() @@ -369,18 +350,17 @@ func (idx *FlatUtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil. for _, ld := range ud.LeafDatas { delHashes = append(delHashes, ld.LeafHash()) } - // For pruned nodes and for multi-block proofs, we need to save the - // undo block in order to undo a block on reorgs. If we have all the - // proofs block by block, that data can be used for reorgs but these - // two modes will not have the proofs available. - if idx.config.Pruned || idx.proofGenInterVal != 1 { + // For pruned nodes, we need to save the undo block in order to undo + // a block on reorgs. If we have all the proofs block by block, that + // data can be used for reorgs but a pruned node will not have the + // proofs available. + if idx.config.Pruned { err = idx.storeUndoBlock(block.Height(), uint64(len(adds)), ud.AccProof.Targets, delHashes) if err != nil { return err } } else { - // Even if we are an err = idx.storeUndoBlock(block.Height(), 0, nil, nil) if err != nil { return err @@ -418,26 +398,9 @@ func (idx *FlatUtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil. return err } - // If the interval is 1, then just save the utreexo proof and we're done. - if idx.proofGenInterVal == 1 { - err = idx.storeProof(block.Height(), false, ud) - if err != nil { - return err - } - } else { - // Every proof generation interval, we'll make a multi-block proof. - if (block.Height() % idx.proofGenInterVal) == 0 { - err = idx.MakeMultiBlockProof(block.Height(), block.Height()-idx.proofGenInterVal, - block, ud, stxos) - if err != nil { - return err - } - } else { - err = idx.storeProof(block.Height(), true, ud) - if err != nil { - return err - } - } + err = idx.storeProof(block.Height(), false, ud) + if err != nil { + return err } return nil @@ -477,65 +440,6 @@ func (idx *FlatUtreexoProofIndex) fetchBlocks(start, end int32) ( return blocks, allStxos, nil } -// deletionsToProve returns all the deletions that need to be proven from the given -// blocks and stxos. -func (idx *FlatUtreexoProofIndex) deletionsToProve(blocks []*btcutil.Block, - stxos [][]blockchain.SpentTxOut) ([]wire.LeafData, [][]uint32, error) { - - // Check that the length is equal to prevent index errors in the below loop. - if len(blocks) != len(stxos) { - err := fmt.Errorf("Got %d blocks but %d stxos", len(blocks), len(stxos)) - return nil, nil, err - } - - // createdMap will keep track of all the utxos created in the blocks that were - // passed in. - createdMap := make(map[wire.OutPoint]uint32) - - remembers := make([][]uint32, len(blocks)) - - var delsToProve []wire.LeafData - for i, block := range blocks { - _, _, inskip, outskip := blockchain.DedupeBlock(block) - - var txOutBlockIdx uint32 - for _, tx := range block.Transactions() { - for outIdx := range tx.MsgTx().TxOut { - // Skip txos on the skip list - if len(outskip) > 0 && outskip[0] == txOutBlockIdx { - outskip = outskip[1:] - txOutBlockIdx++ - continue - } - - op := wire.OutPoint{Hash: *tx.Hash(), Index: uint32(outIdx)} - createdMap[op] = txOutBlockIdx - txOutBlockIdx++ - } - } - - excludeAfter := block.Height() - (block.Height() % idx.proofGenInterVal) - - dels, excludes, err := blockchain.BlockToDelLeaves(stxos[i], idx.chain, - block, inskip, excludeAfter) - if err != nil { - return nil, nil, err - } - - for _, excluded := range excludes { - val, ok := createdMap[excluded.Outpoint] - if ok { - idx := excluded.Height - excludeAfter - remembers[idx] = append(remembers[idx], val) - } - } - - delsToProve = append(delsToProve, dels...) - } - - return delsToProve, remembers, nil -} - // attachBlock attaches the passed in block to the utreexo accumulator state. func (idx *FlatUtreexoProofIndex) attachBlock(blk *btcutil.Block, stxos []blockchain.SpentTxOut) error { _, outCount, inskip, outskip := blockchain.DedupeBlock(blk) @@ -711,89 +615,8 @@ func (idx *FlatUtreexoProofIndex) undoUtreexoState(currentHeight, desiredHeight return nil } -// MakeMultiBlockProof reverses the utreexo accumulator state to the multi-block proof -// generation height and makes a proof of all the stxos in the upcoming interval. The -// utreexo state is caught back up to the current height after the mulit-block proof is -// generated. -func (idx *FlatUtreexoProofIndex) MakeMultiBlockProof(currentHeight, proveHeight int32, - block *btcutil.Block, currentUD *wire.UData, stxos []blockchain.SpentTxOut) error { - - idx.mtx.Lock() - defer idx.mtx.Unlock() - - startRoots := idx.utreexoState.state.GetRoots() - - // Go back to the desired block to generate the multi-block proof. - err := idx.undoUtreexoState(currentHeight, proveHeight) - if err != nil { - return err - } - - blocks, allStxos, err := idx.fetchBlocks(proveHeight, currentHeight) - if err != nil { - return err - } - - if int32(len(blocks)) != idx.proofGenInterVal { - err := fmt.Errorf("Only fetched %d blocks but the proofGenInterVal is %d", - len(blocks), idx.proofGenInterVal) - panic(err) - } - - delsToProve, remembers, err := idx.deletionsToProve(blocks, allStxos) - if err != nil { - return err - } - - ud, err := wire.GenerateUData(delsToProve, idx.utreexoState.state) - if err != nil { - panic(err) - } - - delHashes := make([]utreexo.Hash, 0, len(delsToProve)) - for _, del := range delsToProve { - delHashes = append(delHashes, del.LeafHash()) - } - - // Store the proof that we have created. - err = idx.storeMultiBlockProof(currentHeight, currentUD, ud, delHashes) - if err != nil { - return err - } - - // Store the cache that we have created. - err = idx.storeRemembers(remembers, proveHeight) - if err != nil { - return err - } - - // Re-sync all the reorged blocks. - err = idx.reattachToUtreexoState(blocks, allStxos) - if err != nil { - return err - } - - // Attach the current block. - err = idx.attachBlock(block, stxos) - if err != nil { - return err - } - - // Sanity check. - endRoots := idx.utreexoState.state.GetRoots() - if !reflect.DeepEqual(endRoots, startRoots) { - err := fmt.Errorf("MakeMultiBlockProof: start roots and endroots differ. " + - "Likely that the database is corrupted.") - panic(err) - } - - idx.pStats.UpdateMultiUDStats(len(delsToProve), ud) - - return nil -} - -// getUndoData returns the data needed for undo. For pruned nodes and multi-block proof enabled nodes, -// we fetch the data from the undo block. For archive nodes, we generate the data from the proof. +// getUndoData returns the data needed for undo. For pruned nodes, we fetch the data from the undo block. +// For archive nodes, we generate the data from the proof. func (idx *FlatUtreexoProofIndex) getUndoData(block *btcutil.Block) (uint64, []uint64, []utreexo.Hash, error) { var ( numAdds uint64 @@ -801,7 +624,7 @@ func (idx *FlatUtreexoProofIndex) getUndoData(block *btcutil.Block) (uint64, []u delHashes []utreexo.Hash ) - if !idx.config.Pruned || idx.proofGenInterVal != 1 { + if !idx.config.Pruned { ud, err := idx.FetchUtreexoProof(block.Height(), false) if err != nil { return 0, nil, nil, err @@ -863,9 +686,8 @@ func (idx *FlatUtreexoProofIndex) DisconnectBlock(dbTx database.Tx, block *btcut // Check if we're at a height where proof was generated. Only check if we're not // pruned as we don't keep the historical proofs as a pruned node. - if (block.Height()%idx.proofGenInterVal) == 0 && !idx.config.Pruned { - height := block.Height() / idx.proofGenInterVal - err = idx.proofState.DisconnectBlock(height) + if !idx.config.Pruned { + err = idx.proofState.DisconnectBlock(block.Height()) if err != nil { return err } @@ -1014,11 +836,6 @@ func (idx *FlatUtreexoProofIndex) FetchMultiUtreexoProof(height int32) ( return nil, nil, nil, fmt.Errorf("Cannot fetch historical proof as the node is pruned") } - if height%idx.proofGenInterVal != 0 { - return nil, nil, nil, fmt.Errorf("Attempting to fetch multi-block proof at the wrong height "+ - "height:%d, proofGenInterVal:%d", height, idx.proofGenInterVal) - } - // Fetch the serialized data. proofBytes, err := idx.proofState.FetchData(height) if err != nil { @@ -1062,24 +879,6 @@ func (idx *FlatUtreexoProofIndex) FetchMultiUtreexoProof(height int32) ( return ud, multiUd, dels, nil } -// FetchRemembers fetches the remember indexes of the desired block height. -func (idx *FlatUtreexoProofIndex) FetchRemembers(height int32) ([]uint32, error) { - // Fetch the raw bytes. - rememberBytes, err := idx.rememberIdxState.FetchData(height) - if err != nil { - return nil, err - } - - // Deserialize the raw bytes into a uint32 slice. - r := bytes.NewReader(rememberBytes) - remembers, err := wire.DeserializeRemembers(r) - if err != nil { - return nil, err - } - - return remembers, nil -} - // storeProof serializes and stores the utreexo data in the proof state. func (idx *FlatUtreexoProofIndex) storeProof(height int32, excludeAccProof bool, ud *wire.UData) error { if excludeAccProof { @@ -1185,31 +984,6 @@ func (idx *FlatUtreexoProofIndex) storeRoots(height int32, p utreexo.Utreexo) er return nil } -// storeRemembers serializes and stores the remember indexes in the remember index state. -func (idx *FlatUtreexoProofIndex) storeRemembers(remembers [][]uint32, startHeight int32) error { - for i, remember := range remembers { - if startHeight == 0 && i == 0 { - continue - } - - // Remember indexes size. - size := wire.SerializeRemembersSize(remember) - bytesBuf := bytes.NewBuffer(make([]byte, 0, size)) - - err := wire.SerializeRemembers(bytesBuf, remember) - if err != nil { - return err - } - - err = idx.rememberIdxState.StoreData(startHeight+int32(i), bytesBuf.Bytes()) - if err != nil { - return fmt.Errorf("store remembers err. %v", err) - } - } - - return nil -} - // fetchUndoBlock returns the undoblock for the given block height. func (idx *FlatUtreexoProofIndex) fetchUndoBlock(height int32) (uint64, []uint64, []utreexo.Hash, error) { if height == 0 { @@ -1367,19 +1141,10 @@ func loadFlatFileState(dataDir, name string) (*FlatFileState, error) { // turn is used by the blockchain package. This allows the index to be // seamlessly maintained along with the chain. func NewFlatUtreexoProofIndex(pruned bool, chainParams *chaincfg.Params, - proofGenInterVal *int32, maxMemoryUsage int64, dataDir string, flush func() error) (*FlatUtreexoProofIndex, error) { - - // If the proofGenInterVal argument is nil, use the default value. - var intervalToUse int32 - if proofGenInterVal != nil { - intervalToUse = *proofGenInterVal - } else { - intervalToUse = defaultProofGenInterval - } + maxMemoryUsage int64, dataDir string, flush func() error) (*FlatUtreexoProofIndex, error) { idx := &FlatUtreexoProofIndex{ - proofGenInterVal: intervalToUse, - mtx: new(sync.RWMutex), + mtx: new(sync.RWMutex), config: &UtreexoConfig{ MaxMemoryUsage: maxMemoryUsage, Params: chainParams, @@ -1406,13 +1171,6 @@ func NewFlatUtreexoProofIndex(pruned bool, chainParams *chaincfg.Params, } idx.undoState = *undoState - // Init the remember idx state. - rememberIdxState, err := loadFlatFileState(dataDir, flatRememberIdxName) - if err != nil { - return nil, err - } - idx.rememberIdxState = *rememberIdxState - proofStatsState, err := loadFlatFileState(dataDir, flatUtreexoProofStatsName) if err != nil { return nil, err @@ -1453,12 +1211,6 @@ func DropFlatUtreexoProofIndex(db database.DB, dataDir string, interrupt <-chan return err } - rememberIdxPath := flatFilePath(dataDir, flatRememberIdxName) - err = deleteFlatFile(rememberIdxPath) - if err != nil { - return err - } - proofStatsPath := flatFilePath(dataDir, flatUtreexoProofStatsName) err = deleteFlatFile(proofStatsPath) if err != nil { diff --git a/blockchain/indexers/indexers_test.go b/blockchain/indexers/indexers_test.go index 5c0ab9fc..81d6553e 100644 --- a/blockchain/indexers/indexers_test.go +++ b/blockchain/indexers/indexers_test.go @@ -70,12 +70,10 @@ func createDB(dbName string) (database.DB, string, error) { return db, dbPath, nil } -func initIndexes(interval int32, dbPath string, db database.DB, params *chaincfg.Params) ( +func initIndexes(dbPath string, db database.DB, params *chaincfg.Params) ( *Manager, []Indexer, error) { - proofGenInterval := new(int32) - *proofGenInterval = interval - flatUtreexoProofIndex, err := NewFlatUtreexoProofIndex(false, params, proofGenInterval, 50*1024*1024, dbPath, db.Flush) + flatUtreexoProofIndex, err := NewFlatUtreexoProofIndex(false, params, 50*1024*1024, dbPath, db.Flush) if err != nil { return nil, nil, err } @@ -93,7 +91,7 @@ func initIndexes(interval int32, dbPath string, db database.DB, params *chaincfg return indexManager, indexes, nil } -func indexersTestChain(testName string, proofGenInterval int32) (*blockchain.BlockChain, []Indexer, *chaincfg.Params, *Manager, func()) { +func indexersTestChain(testName string) (*blockchain.BlockChain, []Indexer, *chaincfg.Params, *Manager, func()) { params := chaincfg.RegressionNetParams params.CoinbaseMaturity = 1 @@ -109,7 +107,7 @@ func indexersTestChain(testName string, proofGenInterval int32) (*blockchain.Blo } // Create the indexes to be used in the chain. - indexManager, indexes, err := initIndexes(proofGenInterval, dbPath, db, ¶ms) + indexManager, indexes, err := initIndexes(dbPath, db, ¶ms) if err != nil { tearDown() os.RemoveAll(testDbRoot) @@ -337,113 +335,6 @@ func syncCsnChain(start, end int32, chainToSyncFrom, csnChain *blockchain.BlockC return nil } -// syncCsnChainMultiBlockProof will take in two chains: one to sync from, one to sync. -// Sync will be done from start to end using multi-block proofs. -func syncCsnChainMultiBlockProof(start, end, interval int32, chainToSyncFrom, csnChain *blockchain.BlockChain, - indexes []Indexer) error { - - for b := start; b <= end; b++ { - var err error - var ud, multiUd *wire.UData - var dels []utreexo.Hash - var remembers []uint32 - if (b % interval) == 0 { - for _, indexer := range indexes { - switch idxType := indexer.(type) { - case *FlatUtreexoProofIndex: - var roots utreexo.Stump - if b != 0 { - roots, err = idxType.fetchRoots(b) - if err != nil { - return err - } - } - - _, multiUd, dels, err = idxType.FetchMultiUtreexoProof(b + csnChain.GetUtreexoView().GetProofInterval()) - if err != nil { - return err - } - - ud, _, _, err = idxType.FetchMultiUtreexoProof(b) - if err != nil { - return err - } - - remembers, err = idxType.FetchRemembers(b) - if err != nil { - return err - } - - _, err = utreexo.Verify(roots, dels, multiUd.AccProof) - if err != nil { - panic(err) - } - } - } - err = csnChain.GetUtreexoView().AddProof(dels, &multiUd.AccProof) - if err != nil { - return fmt.Errorf("syncCsnChainMultiBlockProof err at height %d. err: %v:", b, err) - } - } else { - for _, indexer := range indexes { - switch idxType := indexer.(type) { - case *FlatUtreexoProofIndex: - ud, err = idxType.FetchUtreexoProof(b, true) - if err != nil { - return err - } - - remembers, err = idxType.FetchRemembers(b) - if err != nil { - return err - } - } - } - } - - // Fetch the raw block bytes from the database. - block, err := chainToSyncFrom.BlockByHeight(b) - if err != nil { - str := fmt.Errorf("Fail at block height %d err:%s\n", b, err) - return str - } - - ud.RememberIdx = remembers - block.MsgBlock().UData = ud - - _, _, err = csnChain.ProcessBlock(block, blockchain.BFNone) - if err != nil { - str := fmt.Errorf("ProcessBlock fail at block height %d err: %s\n", b, err) - return str - } - - // Fetch and compare the roots after processing the block. - var expectRoots []*chainhash.Hash - for _, indexer := range indexes { - switch idxType := indexer.(type) { - case *FlatUtreexoProofIndex: - // +1 because for bridge indexes, we save the root before the modify. - stump, err := idxType.fetchRoots(block.Height() + 1) - if err != nil { - return err - } - expectRoots = make([]*chainhash.Hash, len(stump.Roots)) - - for i, root := range stump.Roots { - newRoot := chainhash.Hash(root) - expectRoots[i] = &newRoot - } - } - } - if !csnChain.GetUtreexoView().Equal(expectRoots) { - return fmt.Errorf("expected roots %v but got %v on block %s(%d)", - expectRoots, csnChain.GetUtreexoView().GetRoots(), block.Hash().String(), block.Height()) - } - } - - return nil -} - // testUtreexoProof tests the generated proof on the exact same accumulator, // making sure that the verification code pass. func testUtreexoProof(block *btcutil.Block, chain *blockchain.BlockChain, indexes []Indexer) error { @@ -590,7 +481,7 @@ func TestProveUtxos(t *testing.T) { source := rand.NewSource(timenow) rand := rand.New(source) - chain, indexes, params, _, tearDown := indexersTestChain("TestProveUtxos", 1) + chain, indexes, params, _, tearDown := indexersTestChain("TestProveUtxos") defer tearDown() var allSpends []*blockchain.SpendableOut @@ -728,7 +619,7 @@ func TestUtreexoProofIndex(t *testing.T) { source := rand.NewSource(timenow) rand := rand.New(source) - chain, indexes, params, _, tearDown := indexersTestChain("TestUtreexoProofIndex", 1) + chain, indexes, params, _, tearDown := indexersTestChain("TestUtreexoProofIndex") defer tearDown() tip := btcutil.NewBlock(params.GenesisBlock) @@ -845,89 +736,11 @@ func TestUtreexoProofIndex(t *testing.T) { } } -func TestMultiBlockProof(t *testing.T) { - // Always remove the root on return. - defer os.RemoveAll(testDbRoot) - - timenow := time.Now().UnixNano() - source := rand.NewSource(timenow) - rand := rand.New(source) - - chain, indexes, params, _, tearDown := indexersTestChain("TestMultiBlockProof", defaultProofGenInterval) - defer tearDown() - - tip := btcutil.NewBlock(params.GenesisBlock) - - var allSpends []*blockchain.SpendableOut - nextBlock := tip - nextSpends := []*blockchain.SpendableOut{} - - // Create a chain with 100 blocks. - for b := 0; b < 100; b++ { - newBlock, newSpendableOuts, err := blockchain.AddBlock(chain, nextBlock, nextSpends) - if err != nil { - t.Fatalf("timenow:%v. %v", timenow, err) - } - nextBlock = newBlock - - allSpends = append(allSpends, newSpendableOuts...) - - var nextSpendsTmp []*blockchain.SpendableOut - for i := 0; i < len(allSpends); i++ { - randIdx := rand.Intn(len(allSpends)) - - spend := allSpends[randIdx] // get - allSpends = append(allSpends[:randIdx], allSpends[randIdx+1:]...) // delete - nextSpendsTmp = append(nextSpendsTmp, spend) - } - nextSpends = nextSpendsTmp - - if b%10 == 0 { - // Commit the two base blocks to DB - if err := chain.FlushUtxoCache(blockchain.FlushRequired); err != nil { - t.Fatalf("timenow %v. unexpected error while flushing cache: %v. Rand source %v", - timenow, err, source) - } - } - } - - // Create a chain that consumes the data from the indexes and test that this - // chain is able to consume the data properly. - csnChain, _, csnTearDown, err := csnTestChain("TestMultiBlockProof-CsnChain") - defer csnTearDown() - if err != nil { - str := fmt.Errorf("TestMultiBlockProof: csnTestChain err: %v. Rand source: %v", err, source) - t.Fatalf("timenow:%v. %v", timenow, str) - } - - csnChain.GetUtreexoView().SetProofInterval(defaultProofGenInterval) - - // Sync the csn chain to tip-1 from block 1. We can only sync til one block away from the tip - // as the bridge hasn't built a multiblock proof for the next 10 blocks. - err = syncCsnChainMultiBlockProof( - 1, chain.BestSnapshot().Height-1, defaultProofGenInterval, chain, csnChain, indexes) - if err != nil { - t.Fatalf("timenow %v. TestMultiBlockProof: syncCsnChainMultiBlockProof err: %v.", timenow, err) - } - - bridgeBlock, err := chain.BlockByHeight(csnChain.BestSnapshot().Height) - if err != nil { - t.Fatalf("timenow:%v. %v", timenow, err) - } - - // Sanity check that the csn chain did catch up to the bridge chain. - if *bridgeBlock.Hash() != csnChain.BestSnapshot().Hash { - t.Fatalf("timenow %v. expected tip to be %s(%d) but got %s(%d) for the csn chain", - timenow, bridgeBlock.Hash().String(), bridgeBlock.Height(), - csnChain.BestSnapshot().Hash.String(), csnChain.BestSnapshot().Height) - } -} - func TestBridgeNodePruneUndoDataGen(t *testing.T) { // Always remove the root on return. defer os.RemoveAll(testDbRoot) - chain, indexes, params, indexManager, tearDown := indexersTestChain("TestBridgeNodePruneUndoDataGen", 1) + chain, indexes, params, indexManager, tearDown := indexersTestChain("TestBridgeNodePruneUndoDataGen") defer tearDown() var allSpends []*blockchain.SpendableOut diff --git a/blockchain/indexers/utreexoproofstats.go b/blockchain/indexers/utreexoproofstats.go index 49a5207d..54075af0 100644 --- a/blockchain/indexers/utreexoproofstats.go +++ b/blockchain/indexers/utreexoproofstats.go @@ -10,29 +10,23 @@ import ( "io" "math" - "github.com/utreexo/utreexod/chaincfg/chainhash" "github.com/utreexo/utreexod/wire" ) -// proofStatsSize has 19 elements that are each 8 bytes big. -const proofStatsSize int = 8 * 19 +// proofStatsSize has 10 elements that are each 8 bytes big. +const proofStatsSize int = 8 * 10 // proofStats are the relevant proof statistics to check how big each proofs are. type proofStats struct { // The height of the chain for the below stats. BlockHeight uint64 - // The overhead of the multi interval proof. - MultiBlockProofOverheadSum float64 - MultiBlockProofCount uint64 - // The overhead of the single interval proof. BlockProofOverheadSum float64 BlockProofCount uint64 - // Total deletions vs the proven deletions by the multi-block proof. - TotalDels uint64 - TotalProvenDels uint64 + // Total deletions. + TotalDels uint64 // Size of all the leaf datas. LdSize uint64 @@ -45,18 +39,6 @@ type proofStats struct { // Size of all the proofs in the batchproofs. ProofSize uint64 ProofCount uint64 - - // Size of the multi-block targets. - MbTgSize uint64 - MbTgCount uint64 - - // Size of the multi-block proofs. - MbProofSize uint64 - MbProofCount uint64 - - // Size of the leafhashes for the multi-block proofs. - MbHashSize uint64 - MbHashCount uint64 } // UpdateTotalDelCount updates the deletion count in the proof stats. @@ -86,40 +68,15 @@ func (ps *proofStats) UpdateUDStats(excludeAccProof bool, ud *wire.UData) { ps.BlockProofOverheadSum += overhead } -// UpdateMultiUDStats updates the multi-block utreexo data statistics. -func (ps *proofStats) UpdateMultiUDStats(delCount int, multiUd *wire.UData) { - // Update target size. - ps.MbTgSize += uint64(wire.BatchProofSerializeTargetSize(&multiUd.AccProof)) - ps.MbTgCount += uint64(len(multiUd.AccProof.Targets)) - - // Update proof size. - ps.MbProofSize += uint64(wire.BatchProofSerializeAccProofSize(&multiUd.AccProof)) - ps.MbProofCount += uint64(len(multiUd.AccProof.Proof)) - - // Update multi-block proof overhead. - overhead := calcProofOverhead(multiUd) - ps.MultiBlockProofCount++ - ps.MultiBlockProofOverheadSum += overhead - - // Update the multi-block proof hash size. - ps.MbHashSize += uint64(delCount * chainhash.HashSize) - ps.MbHashCount += uint64(delCount) - - // Update proven dels by the multi-block proofs. - ps.TotalProvenDels += uint64(delCount) -} - // LogProofStats outputs a log of the proof statistics. func (ps *proofStats) LogProofStats() { - log.Infof("height %d: totalProvenPercentage %f, totalDels %d, totalProvenDels %d, ldSize %d, ldCount %d, tgSize %d, tgCount %d, proofSize %d, proofCount %d "+ - "mbTgSize %d, mbTgCount %d, mbProofSize %d, mbProofCount %d, mbHashSize %d, mbHashCount %d", - ps.BlockHeight, float64(ps.TotalProvenDels)/float64(ps.TotalDels), ps.TotalDels, ps.TotalProvenDels, ps.LdSize, ps.LdCount, ps.TgSize, ps.TgCount, - ps.ProofSize, ps.ProofCount, ps.MbTgSize, ps.MbTgCount, - ps.MbProofSize, ps.MbProofCount, ps.MbHashSize, ps.MbHashCount) - - log.Infof("height %d, average-blockoverhead %f, average-multiblockoverhead %f, blockoverhead-sum %f, blockcount %d, mboverhead-sum %f, mbCount %d", - ps.BlockHeight, ps.BlockProofOverheadSum/float64(ps.BlockProofCount), ps.MultiBlockProofOverheadSum/float64(ps.MultiBlockProofCount), - ps.BlockProofOverheadSum, ps.BlockProofCount, ps.MultiBlockProofOverheadSum, ps.MultiBlockProofCount) + log.Infof("height %d: totalDels %d, ldSize %d, ldCount %d, tgSize %d, tgCount %d, proofSize %d, proofCount %d", + ps.BlockHeight, ps.TotalDels, ps.LdSize, ps.LdCount, ps.TgSize, ps.TgCount, + ps.ProofSize, ps.ProofCount) + + log.Infof("height %d, average-blockoverhead %f, blockoverhead-sum %f, blockcount %d", + ps.BlockHeight, ps.BlockProofOverheadSum/float64(ps.BlockProofCount), + ps.BlockProofOverheadSum, ps.BlockProofCount) } // Serialize serializes the proof statistics into the writer. @@ -133,18 +90,6 @@ func (ps *proofStats) Serialize(w io.Writer) error { return err } - binary.BigEndian.PutUint64(buf[:], math.Float64bits(ps.MultiBlockProofOverheadSum)) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - - binary.BigEndian.PutUint64(buf[:], ps.MultiBlockProofCount) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - binary.BigEndian.PutUint64(buf[:], math.Float64bits(ps.BlockProofOverheadSum)) _, err = w.Write(buf[:]) if err != nil { @@ -163,12 +108,6 @@ func (ps *proofStats) Serialize(w io.Writer) error { return err } - binary.BigEndian.PutUint64(buf[:], ps.TotalProvenDels) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - binary.BigEndian.PutUint64(buf[:], ps.LdSize) _, err = w.Write(buf[:]) if err != nil { @@ -205,42 +144,6 @@ func (ps *proofStats) Serialize(w io.Writer) error { return err } - binary.BigEndian.PutUint64(buf[:], ps.MbTgSize) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - - binary.BigEndian.PutUint64(buf[:], ps.MbTgCount) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - - binary.BigEndian.PutUint64(buf[:], ps.MbProofSize) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - - binary.BigEndian.PutUint64(buf[:], ps.MbProofCount) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - - binary.BigEndian.PutUint64(buf[:], ps.MbHashSize) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - - binary.BigEndian.PutUint64(buf[:], ps.MbHashCount) - _, err = w.Write(buf[:]) - if err != nil { - return err - } - return nil } @@ -255,19 +158,6 @@ func (ps *proofStats) Deserialize(r io.Reader) error { } ps.BlockHeight = binary.BigEndian.Uint64(buf[:]) - _, err = r.Read(buf[:]) - if err != nil { - return err - } - res = binary.BigEndian.Uint64(buf[:]) - ps.MultiBlockProofOverheadSum = math.Float64frombits(res) - - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MultiBlockProofCount = binary.BigEndian.Uint64(buf[:]) - _, err = r.Read(buf[:]) if err != nil { return err @@ -287,12 +177,6 @@ func (ps *proofStats) Deserialize(r io.Reader) error { } ps.TotalDels = binary.BigEndian.Uint64(buf[:]) - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.TotalProvenDels = binary.BigEndian.Uint64(buf[:]) - _, err = r.Read(buf[:]) if err != nil { return err @@ -329,42 +213,6 @@ func (ps *proofStats) Deserialize(r io.Reader) error { } ps.ProofCount = binary.BigEndian.Uint64(buf[:]) - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MbTgSize = binary.BigEndian.Uint64(buf[:]) - - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MbTgCount = binary.BigEndian.Uint64(buf[:]) - - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MbProofSize = binary.BigEndian.Uint64(buf[:]) - - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MbProofCount = binary.BigEndian.Uint64(buf[:]) - - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MbHashSize = binary.BigEndian.Uint64(buf[:]) - - _, err = r.Read(buf[:]) - if err != nil { - return err - } - ps.MbHashCount = binary.BigEndian.Uint64(buf[:]) - return nil } diff --git a/blockchain/utreexoviewpoint.go b/blockchain/utreexoviewpoint.go index 4b4612a8..744339e3 100644 --- a/blockchain/utreexoviewpoint.go +++ b/blockchain/utreexoviewpoint.go @@ -22,10 +22,6 @@ import ( // UtreexoViewpoint is the compact state of the chainstate using the utreexo accumulator type UtreexoViewpoint struct { - // proofInterval is the interval of block in which to receive the - // accumulator proofs. Only relevant when in multiblock proof mode. - proofInterval int32 - // accumulator is the bare-minimum accumulator for the utxo set. // It only holds the root hashes and the number of elements in the // accumulator. @@ -52,12 +48,12 @@ func (uview *UtreexoViewpoint) ProcessUData(block *btcutil.Block, // Extracts the block into additions and deletions that will be processed. // Adds correspond to newly created UTXOs and dels correspond to STXOs. - adds, err := ExtractAccumulatorAdds(block, bestChain, ud.RememberIdx) + adds, err := ExtractAccumulatorAdds(block, bestChain, []uint32{}) if err != nil { return err } - dels, err := ExtractAccumulatorDels(block, bestChain, ud.RememberIdx) + dels, err := ExtractAccumulatorDels(block, bestChain, []uint32{}) if err != nil { return err } @@ -82,7 +78,7 @@ func (uview *UtreexoViewpoint) VerifyUData(block *btcutil.Block, // Extracts the block into additions and deletions that will be processed. // Adds correspond to newly created UTXOs and dels correspond to STXOs. - dels, err := ExtractAccumulatorDels(block, bestChain, ud.RememberIdx) + dels, err := ExtractAccumulatorDels(block, bestChain, []uint32{}) if err != nil { return err } @@ -109,12 +105,6 @@ func (uview *UtreexoViewpoint) VerifyUData(block *btcutil.Block, } } - // TODO we should be verifying here but aren't as the accumulator.Verify - // function expects a complete proof. - if uview.proofInterval != 1 { - return nil - } - return uview.accumulator.Verify(dels, ud.AccProof, false) } @@ -142,33 +132,23 @@ func (uview *UtreexoViewpoint) Modify(ud *wire.UData, addHashes[i] = add.Hash } - var err error + // We have to do this in order to generate the update data for the wallet. + // TODO: get rid of this once the pollard can generate the update data. + s := uview.accumulator.GetStump() var updateData utreexo.UpdateData - if uview.proofInterval == 1 { - err = uview.accumulator.Ingest(dels, ud.AccProof) - if err != nil { - return nil, err - } - - // We have to do this in order to generate the update data for the wallet. - // TODO: get rid of this once the pollard can generate the update data. - s := uview.accumulator.GetStump() - updateData, err = s.Update(dels, addHashes, ud.AccProof) - if err != nil { - return nil, err - } + updateData, err := s.Update(dels, addHashes, ud.AccProof) + if err != nil { + return nil, err + } - err = uview.accumulator.Modify(adds, dels, ud.AccProof) - if err != nil { - return nil, err - } - } else { - // TODO we should be verifying here but aren't as the accumulator.Verify - // function expects a complete proof. - err = uview.accumulator.Modify(adds, dels, ud.AccProof) - if err != nil { - return nil, err - } + // Ingest and modify the accumulator. + err = uview.accumulator.Ingest(dels, ud.AccProof) + if err != nil { + return nil, err + } + err = uview.accumulator.Modify(adds, dels, ud.AccProof) + if err != nil { + return nil, err } return &updateData, nil @@ -833,17 +813,6 @@ func (uview *UtreexoViewpoint) compareRoots(compRoot []utreexo.Hash) bool { return true } -// SetProofInterval sets the interval of the utreexo proofs to be received by the node. -// Ex: interval of 10 means that you receive a utreexo proof every 10 blocks. -func (uview *UtreexoViewpoint) SetProofInterval(proofInterval int32) { - uview.proofInterval = proofInterval -} - -// GetProofInterval returns the proof interval of the current utreexo viewpoint. -func (uview *UtreexoViewpoint) GetProofInterval() int32 { - return uview.proofInterval -} - // PruneAll deletes all the cached leaves in the utreexo viewpoint, leaving only the // roots of the accumulator. func (uview *UtreexoViewpoint) PruneAll() { @@ -854,9 +823,7 @@ func (uview *UtreexoViewpoint) PruneAll() { // NewUtreexoViewpoint returns an empty UtreexoViewpoint func NewUtreexoViewpoint() *UtreexoViewpoint { return &UtreexoViewpoint{ - // Use 1 as a default value. - proofInterval: 1, - accumulator: utreexo.NewMapPollard(false), + accumulator: utreexo.NewMapPollard(false), } } @@ -864,8 +831,6 @@ func NewUtreexoViewpoint() *UtreexoViewpoint { // assumedUtreexoPoint. func (b *BlockChain) SetUtreexoStateFromAssumePoint() { b.utreexoView = &UtreexoViewpoint{ - // Use 1 as a default value. - proofInterval: 1, accumulator: utreexo.NewMapPollardFromRoots( b.assumeUtreexoPoint.Roots, b.assumeUtreexoPoint.NumLeaves, false), } diff --git a/btcjson/chainsvrresults.go b/btcjson/chainsvrresults.go index 53f08016..79a3d6e2 100644 --- a/btcjson/chainsvrresults.go +++ b/btcjson/chainsvrresults.go @@ -930,7 +930,6 @@ type ProveUtxoChainTipInclusionVerboseResult struct { // is returned. type GetUtreexoProofVerboseResult struct { ProofHashes []string `json:"proofhashes"` - RememberIndexes []uint32 `json:"rememberindexes"` TargetHashes []string `json:"targethashes"` TargetPreimages []string `json:"targetpreimages"` ProofTargets []uint64 `json:"prooftargets"` diff --git a/integration/utreexocompactstatenode_test.go b/integration/utreexocompactstatenode_test.go index b5dacca2..81261fd1 100644 --- a/integration/utreexocompactstatenode_test.go +++ b/integration/utreexocompactstatenode_test.go @@ -63,9 +63,8 @@ func fetchBlocks(blockhashes []*chainhash.Hash, harness *rpctest.Harness) ( accProof := utreexo.Proof{Targets: utreexoProof.ProofTargets, Proof: proofHashes} udata := wire.UData{ - AccProof: accProof, - LeafDatas: lds, - RememberIdx: utreexoProof.RememberIndexes, + AccProof: accProof, + LeafDatas: lds, } return &udata, nil diff --git a/rpcserver.go b/rpcserver.go index 14b0cea3..8ef91a3b 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -3453,7 +3453,6 @@ func handleGetUtreexoProof(s *rpcServer, cmd interface{}, closeChan <-chan struc getReply := &btcjson.GetUtreexoProofVerboseResult{ ProofHashes: proofString, - RememberIndexes: udata.RememberIdx, TargetHashes: targetHashString, TargetPreimages: targetPreimageString, ProofTargets: udata.AccProof.Targets, diff --git a/rpcserverhelp.go b/rpcserverhelp.go index 8443fd2f..6a6b2c9e 100644 --- a/rpcserverhelp.go +++ b/rpcserverhelp.go @@ -617,7 +617,6 @@ var helpDescsEnUS = map[string]string{ // GetUtreexoProofVerboseResult help. "getutreexoproofverboseresult-proofhashes": "One half of the utreexo accumulator proof (the other half being prooftargets).\n" + "The proof hashes for the utreexo accumulator proof of the given UTXOs.", - "getutreexoproofverboseresult-rememberindexes": "Indexes of the targets to cache when performing initial block download", "getutreexoproofverboseresult-targethashes": "Hashes that correspond to each of the prooftargets", "getutreexoproofverboseresult-targetpreimages": "Preimages of the targethashes", "getutreexoproofverboseresult-prooftargets": "One half of the utreexo accumulator proof (the other half being proofhashes).\n" + diff --git a/server.go b/server.go index 6079d69b..610308aa 100644 --- a/server.go +++ b/server.go @@ -3249,14 +3249,10 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, if cfg.FlatUtreexoProofIndex { indxLog.Info("Flat Utreexo Proof index is enabled") - // Create interval to pass to the flat utreexo proof index. - interval := new(int32) - *interval = 1 - var err error s.flatUtreexoProofIndex, err = indexers.NewFlatUtreexoProofIndex( - cfg.Prune != 0, chainParams, interval, - cfg.UtreexoProofIndexMaxMemory*1024*1024, cfg.DataDir, db.Flush) + cfg.Prune != 0, chainParams, cfg.UtreexoProofIndexMaxMemory*1024*1024, + cfg.DataDir, db.Flush) if err != nil { return nil, err } diff --git a/wire/udata.go b/wire/udata.go index eccceb2d..173cc354 100644 --- a/wire/udata.go +++ b/wire/udata.go @@ -21,9 +21,6 @@ type UData struct { // LeafDatas are the tx validation data for every input. LeafDatas []LeafData - - // All the indexes of new utxos to remember. - RememberIdx []uint32 } // StxosHashes returns the hash of all stxos in this UData. The hashes returned @@ -48,32 +45,19 @@ func (ud *UData) SerializeUtxoDataSize() int { return size } -// SerializeRememberIdxSize returns the number of bytes it would take to serialize the -// remember indexes. -func (ud *UData) SerializeRememberIdxSize() int { - return SerializeRemembersSize(ud.RememberIdx) -} - // SerializeSize returns the number of bytes it would take to serialize the // UData. func (ud *UData) SerializeSize() int { - // Accumulator proof size. - size := BatchProofSerializeSize(&ud.AccProof) - // Leaf data size. - size += ud.SerializeUtxoDataSize() - - // Remember indexes size. - return size + ud.SerializeRememberIdxSize() + return BatchProofSerializeSize(&ud.AccProof) + ud.SerializeUtxoDataSize() } // ----------------------------------------------------------------------------- // UData serialization includes all the data that is needed for a utreexo node to -// verify a block or a tx with only the utreexo roots. Remember indexes aren't -// necessary but are there for caching purposes. +// verify a block or a tx with only the utreexo roots. // // The serialized format is: -// [] +// [] // // Accumulator proof serialization follows the batchproof serialization found // in wire/batchproof.go. @@ -83,7 +67,6 @@ func (ud *UData) SerializeSize() int { // All together, the serialization looks like so: // // Field Type Size -// remember indexes []varint variable // accumulator proof []byte variable // leaf datas []byte variable // @@ -91,13 +74,8 @@ func (ud *UData) SerializeSize() int { // Serialize encodes the UData to w using the UData serialization format. func (ud *UData) Serialize(w io.Writer) error { - err := SerializeRemembers(w, ud.RememberIdx) - if err != nil { - return err - } - // Write batch proof. - err = BatchProofSerialize(w, &ud.AccProof) + err := BatchProofSerialize(w, &ud.AccProof) if err != nil { returnErr := messageError("Serialize", err.Error()) return returnErr @@ -122,12 +100,6 @@ func (ud *UData) Serialize(w io.Writer) error { // Deserialize encodes the UData to w using the UData serialization format. func (ud *UData) Deserialize(r io.Reader) error { - remembers, err := DeserializeRemembers(r) - if err != nil { - return err - } - ud.RememberIdx = remembers - proof, err := BatchProofDeserialize(r) if err != nil { returnErr := messageError("Deserialize AccProof", err.Error()) @@ -144,8 +116,8 @@ func (ud *UData) Deserialize(r io.Reader) error { for i := range ud.LeafDatas { err = ud.LeafDatas[i].Deserialize(r) if err != nil { - str := fmt.Sprintf("rememberCount %d, targetCount:%d, Stxos[%d], err:%s\n", - len(remembers), len(ud.AccProof.Targets), i, err.Error()) + str := fmt.Sprintf("targetCount:%d, Stxos[%d], err:%s\n", + len(ud.AccProof.Targets), i, err.Error()) returnErr := messageError("Deserialize stxos", str) return returnErr } @@ -158,11 +130,10 @@ func (ud *UData) Deserialize(r io.Reader) error { // UData compact serialization includes only the data that is missing for a // utreexo node to verify a block or a tx with only the utreexo roots. The // compact serialization leaves out data that is able to be fetched locally -// by a node. Remember indexes aren't necessary but are there for caching -// purposes. +// by a node. // // The serialized format is: -// [] +// [] // // Accumulator proof serialization follows the batchproof serialization found // in wire/batchproof.go. @@ -172,7 +143,6 @@ func (ud *UData) Deserialize(r io.Reader) error { // All together, the serialization looks like so: // // Field Type Size -// remember indexes []varint variable // accumulator proof []byte variable // leaf datas []byte variable // @@ -201,14 +171,8 @@ func (ud *UData) SerializeUxtoDataSizeCompact() int { // SerializeSizeCompact returns the number of bytes it would take to serialize the // UData using the compact UData serialization format. func (ud *UData) SerializeSizeCompact() int { - // Accumulator proof size. - size := BatchProofSerializeSize(&ud.AccProof) - - // Leaf data size - size += ud.SerializeUxtoDataSizeCompact() - - // Remember indexes size. - return size + ud.SerializeRememberIdxSize() + // Accumulator proof size + leaf data size + return BatchProofSerializeSize(&ud.AccProof) + ud.SerializeUxtoDataSizeCompact() } // SerializeCompact encodes the UData to w using the compact UData @@ -216,12 +180,7 @@ func (ud *UData) SerializeSizeCompact() int { // the exception that compact leaf data serialization is used. Everything else // remains the same. func (ud *UData) SerializeCompact(w io.Writer) error { - err := SerializeRemembers(w, ud.RememberIdx) - if err != nil { - return err - } - - err = BatchProofSerialize(w, &ud.AccProof) + err := BatchProofSerialize(w, &ud.AccProof) if err != nil { returnErr := messageError("SerializeCompact", err.Error()) return returnErr @@ -250,16 +209,9 @@ func (ud *UData) SerializeCompact(w io.Writer) error { // in as a correct txCount is critical for deserializing correctly. When // deserializing a block, txInCount does not matter. func (ud *UData) DeserializeCompact(r io.Reader) error { - remembers, err := DeserializeRemembers(r) - if err != nil { - return err - } - ud.RememberIdx = remembers - proof, err := BatchProofDeserialize(r) if err != nil { - returnErr := messageError("DeserializeCompact", err.Error()) - return returnErr + return err } ud.AccProof = *proof @@ -273,8 +225,8 @@ func (ud *UData) DeserializeCompact(r io.Reader) error { for i := range ud.LeafDatas { err = ud.LeafDatas[i].DeserializeCompact(r) if err != nil { - str := fmt.Sprintf("rememberCount %d, targetCount:%d, LeafDatas[%d], err:%s\n", - len(remembers), len(ud.AccProof.Targets), i, err.Error()) + str := fmt.Sprintf("targetCount:%d, LeafDatas[%d], err:%s\n", + len(ud.AccProof.Targets), i, err.Error()) returnErr := messageError("Deserialize leaf datas", str) return returnErr } @@ -367,54 +319,6 @@ func (ud *UData) DeserializeCompactNoAccProof(r io.Reader) error { return nil } -// SerializeRemembersSize returns how many bytes it would take to serialize -// all the remember indexes. -func SerializeRemembersSize(remembers []uint32) int { - size := VarIntSerializeSize(uint64(len(remembers))) - for _, remember := range remembers { - size += VarIntSerializeSize(uint64(remember)) - } - - return size -} - -// SerializeRemembers serializes the passed in remembers to the writer. -func SerializeRemembers(w io.Writer, remembers []uint32) error { - err := WriteVarInt(w, 0, uint64(len(remembers))) - if err != nil { - return err - } - - for _, remember := range remembers { - err = WriteVarInt(w, 0, uint64(remember)) - if err != nil { - return err - } - } - - return nil -} - -// DeserializeRemembers deserializes the remember indexes from the reader and -// returns the deserialized remembers. -func DeserializeRemembers(r io.Reader) ([]uint32, error) { - count, err := ReadVarInt(r, 0) - if err != nil { - return nil, err - } - - remembers := make([]uint32, count) - for i := range remembers { - remember, err := ReadVarInt(r, 0) - if err != nil { - return nil, err - } - remembers[i] = uint32(remember) - } - - return remembers, nil -} - // HashesFromLeafDatas hashes the passed in leaf datas. Returns an error if a // leaf data is compact as you can't generate the correct hash. func HashesFromLeafDatas(leafDatas []LeafData) ([]utreexo.Hash, error) { diff --git a/wire/udata_test.go b/wire/udata_test.go index c5a60326..3fa4821d 100644 --- a/wire/udata_test.go +++ b/wire/udata_test.go @@ -21,7 +21,6 @@ type testData struct { name string height int32 leavesPerBlock []LeafData - rememberIdx []uint32 size int sizeCompact int @@ -59,9 +58,8 @@ var mainNetBlock104773 = testData{ IsCoinBase: false, }, }, - rememberIdx: []uint32{2, 3}, - size: 220, - sizeCompact: 86, + size: 217, + sizeCompact: 83, sizeCompactNoAcc: 82, } @@ -114,9 +112,8 @@ var testNetBlock383 = testData{ IsCoinBase: true, }, }, - rememberIdx: []uint32{1, 2, 3}, - size: 465, - sizeCompact: 197, + size: 461, + sizeCompact: 193, sizeCompactNoAcc: 192, } @@ -183,13 +180,6 @@ func checkUDEqual(ud, checkUData *UData, isCompact bool, name string) error { } } - for i := range ud.RememberIdx { - if ud.RememberIdx[i] != checkUData.RememberIdx[i] { - return fmt.Errorf("%s: UData RememberIdx mismatch. expect %v, got %v", - name, ud.RememberIdx[i], checkUData.RememberIdx[i]) - } - } - if !isCompact { if !reflect.DeepEqual(ud, checkUData) { if !reflect.DeepEqual(ud.AccProof, checkUData.AccProof) { @@ -199,9 +189,6 @@ func checkUDEqual(ud, checkUData *UData, isCompact bool, name string) error { if !reflect.DeepEqual(ud.LeafDatas, checkUData.LeafDatas) { return fmt.Errorf("ud and checkUData reflect.DeepEqual LeafDatas mismatch") } - if !reflect.DeepEqual(ud.RememberIdx, checkUData.RememberIdx) { - return fmt.Errorf("ud and checkUData reflect.DeepEqual TxoTTLs mismatch") - } } } @@ -247,8 +234,6 @@ func TestUDataSerializeSize(t *testing.T) { t.Fatal(err) } - ud.RememberIdx = testData.rememberIdx - // Append to the tests. tests = append(tests, test{ name: testData.name, @@ -340,8 +325,6 @@ func TestUDataSerializeSize(t *testing.T) { // Test that SerializeUxtoDataSizeCompact and SerializeUxtoDataSizeCompact // sums up to the entire thing. totals := test.ud.SerializeUxtoDataSizeCompact() + test.ud.SerializeAccSizeCompact() - totals += test.ud.SerializeRememberIdxSize() - if totals != test.ud.SerializeSizeCompact() { t.Errorf("%s: expected %d for but got %d as the sum of utxodata, accumulator data, and the remember idxs", test.name, test.ud.SerializeSizeCompact(), totals) @@ -385,8 +368,6 @@ func TestUDataSerialize(t *testing.T) { t.Fatal(err) } - ud.RememberIdx = testData.rememberIdx - // Append to the tests. tests = append(tests, test{name: testData.name, ud: *ud}) }