diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index 31e478117cd54..ad75002ede621 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -86,6 +86,9 @@ type layer interface {
// This is meant to be used during shutdown to persist the layer without
// flattening everything down (bad for reorgs).
journal(w io.Writer) error
+
+ // isStale returns whether this layer has become stale or if it's still live.
+ isStale() bool
}
// Config contains the settings for database.
diff --git a/triedb/pathdb/difflayer.go b/triedb/pathdb/difflayer.go
index 6b87883482c97..87cf777ead697 100644
--- a/triedb/pathdb/difflayer.go
+++ b/triedb/pathdb/difflayer.go
@@ -41,7 +41,8 @@ type diffLayer struct {
memory uint64 // Approximate guess as to how much memory we use
parent layer // Parent layer modified by this one, never nil, **can be changed**
- lock sync.RWMutex // Lock used to protect parent
+ stale bool // Signals that the layer became stale (referenced disk layer became stale)
+ lock sync.RWMutex // Lock used to protect parent and stale fields
}
// newDiffLayer creates a new diff layer on top of an existing layer.
@@ -95,6 +96,25 @@ func (dl *diffLayer) parentLayer() layer {
return dl.parent
}
+// isStale returns whether this layer has become stale or if it's still live.
+func (dl *diffLayer) isStale() bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.stale
+}
+
+// markStale sets the stale flag as true.
+func (dl *diffLayer) markStale() {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ if dl.stale {
+ panic("triedb diff layer is stale")
+ }
+ dl.stale = true
+}
+
// node implements the layer interface, retrieving the trie node blob with the
// provided node information. No error will be returned if the node is not found.
func (dl *diffLayer) node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error) {
@@ -103,6 +123,9 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
dl.lock.RLock()
defer dl.lock.RUnlock()
+ if dl.stale {
+ return nil, common.Hash{}, nil, errSnapshotStale
+ }
// If the trie node is known locally, return it
subset, ok := dl.nodes[owner]
if ok {
@@ -125,7 +148,7 @@ func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map
}
// persist flushes the diff layer and all its parent layers to disk layer.
-func (dl *diffLayer) persist(force bool) (layer, error) {
+func (dl *diffLayer) persist(force bool) (*diskLayer, error) {
if parent, ok := dl.parentLayer().(*diffLayer); ok {
// Hold the lock to prevent any read operation until the new
// parent is linked correctly.
@@ -147,7 +170,7 @@ func (dl *diffLayer) persist(force bool) (layer, error) {
// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
// it. The method will panic if called onto a non-bottom-most diff layer.
-func diffToDisk(layer *diffLayer, force bool) (layer, error) {
+func diffToDisk(layer *diffLayer, force bool) (*diskLayer, error) {
disk, ok := layer.parentLayer().(*diskLayer)
if !ok {
panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer()))
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index eadcfacef76dd..e402b68c663c4 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -73,7 +73,7 @@ func (dl *diskLayer) parentLayer() layer {
return nil
}
-// isStale return whether this layer has become stale (was flattened across) or if
+// isStale returns whether this layer has become stale (was flattened across) or if
// it's still live.
func (dl *diskLayer) isStale() bool {
dl.lock.RLock()
diff --git a/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go
index d314779910e9a..56462f19a3b52 100644
--- a/triedb/pathdb/layertree.go
+++ b/triedb/pathdb/layertree.go
@@ -33,8 +33,11 @@ import (
// thread-safe to use. However, callers need to ensure the thread-safety
// of the referenced layer by themselves.
type layerTree struct {
- lock sync.RWMutex
- layers map[common.Hash]layer
+ lock sync.RWMutex
+ base *diskLayer
+ layers map[common.Hash]layer
+ descendants map[common.Hash]map[common.Hash]struct{}
+ lookup *lookup
}
// newLayerTree constructs the layerTree with the given head layer.
@@ -45,17 +48,56 @@ func newLayerTree(head layer) *layerTree {
}
// reset initializes the layerTree by the given head layer.
-// All the ancestors will be iterated out and linked in the tree.
func (tree *layerTree) reset(head layer) {
tree.lock.Lock()
defer tree.lock.Unlock()
- var layers = make(map[common.Hash]layer)
- for head != nil {
- layers[head.rootHash()] = head
- head = head.parentLayer()
+ var (
+ current = head
+ layers = make(map[common.Hash]layer)
+ descendants = make(map[common.Hash]map[common.Hash]struct{})
+ )
+ for {
+ hash := current.rootHash()
+ layers[hash] = current
+
+ // Traverse the ancestors (diff only) of the current layer and link them
+ for h := range diffAncestors(current) {
+ subset := descendants[h]
+ if subset == nil {
+ subset = make(map[common.Hash]struct{})
+ descendants[h] = subset
+ }
+ subset[hash] = struct{}{}
+ }
+ parent := current.parentLayer()
+ if parent == nil {
+ break
+ }
+ current = parent
}
+ tree.base = current.(*diskLayer) // panic if it's a diff layer
tree.layers = layers
+ tree.descendants = descendants
+ tree.lookup = newLookup(head, tree.isDescendant)
+}
+
+// diffAncestors returns all the ancestors of the specific layer (disk layer
+// is not included).
+func diffAncestors(layer layer) map[common.Hash]struct{} {
+ set := make(map[common.Hash]struct{})
+ for {
+ parent := layer.parentLayer()
+ if parent == nil {
+ break
+ }
+ if _, ok := parent.(*diskLayer); ok {
+ break
+ }
+ set[parent.rootHash()] = struct{}{}
+ layer = parent
+ }
+ return set
}
// get retrieves a layer belonging to the given state root.
@@ -66,6 +108,17 @@ func (tree *layerTree) get(root common.Hash) layer {
return tree.layers[types.TrieRootHash(root)]
}
+// isDescendant returns whether the specified layer with given root is a
+// descendant of a specific ancestor.
+func (tree *layerTree) isDescendant(root common.Hash, ancestor common.Hash) bool {
+ subset := tree.descendants[ancestor]
+ if subset == nil {
+ return false
+ }
+ _, ok := subset[root]
+ return ok
+}
+
// forEach iterates the stored layers inside and applies the
// given callback on them.
func (tree *layerTree) forEach(onLayer func(layer)) {
@@ -104,8 +157,20 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6
l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states)
tree.lock.Lock()
+ defer tree.lock.Unlock()
+
tree.layers[l.rootHash()] = l
- tree.lock.Unlock()
+
+ // Link the new layer into the descendents set
+ for h := range diffAncestors(l) {
+ subset := tree.descendants[h]
+ if subset == nil {
+ subset = make(map[common.Hash]struct{})
+ tree.descendants[h] = subset
+ }
+ subset[l.rootHash()] = struct{}{}
+ }
+ tree.lookup.addLayer(l)
return nil
}
@@ -131,8 +196,19 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
if err != nil {
return err
}
- // Replace the entire layer tree with the flat base
+ tree.base = base
+
+ // Mark all diff layers are stale, note the original disk layer
+ // has already been marked as stale previously.
+ for _, l := range tree.layers {
+ if dl, ok := l.(*diffLayer); ok {
+ dl.markStale()
+ }
+ }
+ // Reset the layer tree with the single new disk layer
tree.layers = map[common.Hash]layer{base.rootHash(): base}
+ tree.descendants = make(map[common.Hash]map[common.Hash]struct{})
+ tree.lookup = newLookup(base, tree.isDescendant)
return nil
}
// Dive until we run out of layers or reach the persistent database
@@ -147,6 +223,11 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
}
// We're out of layers, flatten anything below, stopping if it's the disk or if
// the memory limit is not yet exceeded.
+ var (
+ err error
+ replaced layer
+ newBase *diskLayer
+ )
switch parent := diff.parentLayer().(type) {
case *diskLayer:
return nil
@@ -156,14 +237,24 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
// parent is linked correctly.
diff.lock.Lock()
- base, err := parent.persist(false)
+ // Hold the reference of the original layer being replaced
+ replaced = parent
+
+ // Replace the original parent layer with new disk layer. The procedure
+ // can be illustrated as below:
+ //
+ // Before change: base <- C1 <- C2 <- C3 (diff)
+ // After change: base(stale) <- C1 (C2 is replaced by newBase)
+ // newBase <- C3 (diff)
+ newBase, err = parent.persist(false)
if err != nil {
diff.lock.Unlock()
return err
}
- tree.layers[base.rootHash()] = base
- diff.parent = base
+ tree.layers[newBase.rootHash()] = newBase
+ // Link the new parent and release the lock
+ diff.parent = newBase
diff.lock.Unlock()
default:
@@ -177,19 +268,31 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
children[parent] = append(children[parent], root)
}
}
+ // clearDiff removes the indexes of the specific layer if it's a
+ // diff layer (disk layer has no index).
+ clearDiff := func(layer layer) {
+ diff, ok := layer.(*diffLayer)
+ if !ok {
+ return
+ }
+ diff.markStale()
+ delete(tree.descendants, diff.rootHash())
+ tree.lookup.removeLayer(diff)
+ }
var remove func(root common.Hash)
remove = func(root common.Hash) {
+ clearDiff(tree.layers[root])
+
+ // Unlink the layer from the layer tree and cascade to its children
delete(tree.layers, root)
for _, child := range children[root] {
remove(child)
}
delete(children, root)
}
- for root, layer := range tree.layers {
- if dl, ok := layer.(*diskLayer); ok && dl.isStale() {
- remove(root)
- }
- }
+ remove(tree.base.rootHash()) // remove the old/stale disk layer
+ clearDiff(replaced) // remove the lookup data of the stale parent being replaced
+ tree.base = newBase // update the base layer with newly constructed one
return nil
}
@@ -198,17 +301,18 @@ func (tree *layerTree) bottom() *diskLayer {
tree.lock.RLock()
defer tree.lock.RUnlock()
- if len(tree.layers) == 0 {
- return nil // Shouldn't happen, empty tree
- }
- // pick a random one as the entry point
- var current layer
- for _, layer := range tree.layers {
- current = layer
- break
- }
- for current.parentLayer() != nil {
- current = current.parentLayer()
+ return tree.base
+}
+
+// lookupNode returns the layer that is confirmed to contain the node being
+// searched for.
+func (tree *layerTree) lookupNode(accountHash common.Hash, path []byte, state common.Hash) layer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ tip := tree.lookup.nodeTip(accountHash, path, state)
+ if tip == (common.Hash{}) {
+ return tree.base
}
- return current.(*diskLayer)
+ return tree.layers[tip]
}
diff --git a/triedb/pathdb/layertree_test.go b/triedb/pathdb/layertree_test.go
new file mode 100644
index 0000000000000..9e0401f15450a
--- /dev/null
+++ b/triedb/pathdb/layertree_test.go
@@ -0,0 +1,728 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+func newTestLayerTree() *layerTree {
+ db := New(rawdb.NewMemoryDatabase(), nil, false)
+ l := newDiskLayer(common.Hash{0x1}, 0, db, nil, newNodeBuffer(0, nil, 0))
+ t := newLayerTree(l)
+ return t
+}
+
+func TestLayerCap(t *testing.T) {
+ var cases = []struct {
+ init func() *layerTree
+ head common.Hash
+ layers int
+ base common.Hash
+ snapshot map[common.Hash]struct{}
+ }{
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ head: common.Hash{0x4},
+ layers: 2,
+ base: common.Hash{0x2},
+ snapshot: map[common.Hash]struct{}{
+ common.Hash{0x2}: {},
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C3->C4 (HEAD)
+ head: common.Hash{0x4},
+ layers: 1,
+ base: common.Hash{0x3},
+ snapshot: map[common.Hash]struct{}{
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C4 (HEAD)
+ head: common.Hash{0x4},
+ layers: 0,
+ base: common.Hash{0x4},
+ snapshot: map[common.Hash]struct{}{
+ common.Hash{0x4}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C2'->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ head: common.Hash{0x4a},
+ layers: 2,
+ base: common.Hash{0x2a},
+ snapshot: map[common.Hash]struct{}{
+ common.Hash{0x4a}: {},
+ common.Hash{0x3a}: {},
+ common.Hash{0x2a}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C2'->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C3->C4 (HEAD)
+ head: common.Hash{0x4a},
+ layers: 1,
+ base: common.Hash{0x3a},
+ snapshot: map[common.Hash]struct{}{
+ common.Hash{0x4a}: {},
+ common.Hash{0x3a}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ // ->C3'->C4'
+ head: common.Hash{0x4a},
+ layers: 2,
+ base: common.Hash{0x2},
+ snapshot: map[common.Hash]struct{}{
+ common.Hash{0x4a}: {},
+ common.Hash{0x3a}: {},
+ common.Hash{0x4b}: {},
+ common.Hash{0x3b}: {},
+ common.Hash{0x2}: {},
+ },
+ },
+ }
+ for _, c := range cases {
+ tr := c.init()
+ if err := tr.cap(c.head, c.layers); err != nil {
+ t.Fatalf("Failed to cap the layer tree %v", err)
+ }
+ if tr.bottom().root != c.base {
+ t.Fatalf("Unexpected bottom layer tree root, want %v, got %v", c.base, tr.bottom().root)
+ }
+ if len(c.snapshot) != len(tr.layers) {
+ t.Fatalf("Unexpected layer tree size, want %v, got %v", len(c.snapshot), len(tr.layers))
+ }
+ for h := range tr.layers {
+ if _, ok := c.snapshot[h]; !ok {
+ t.Fatalf("Unexpected layer %v", h)
+ }
+ }
+ }
+}
+
+func TestBaseLayer(t *testing.T) {
+ tr := newTestLayerTree()
+
+ var cases = []struct {
+ op func()
+ base common.Hash
+ }{
+ // no operation
+ {
+ func() {},
+ common.Hash{0x1},
+ },
+ // add layers on top
+ {
+ func() {
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ },
+ common.Hash{0x1},
+ },
+ // forcibly flush all the layers
+ {
+ func() {
+ tr.cap(common.Hash{0x3}, 0)
+ },
+ common.Hash{0x3},
+ },
+ // add layers on top and cap
+ {
+ func() {
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x5}, common.Hash{0x4}, 4, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x6}, common.Hash{0x5}, 5, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.cap(common.Hash{0x6}, 2)
+ },
+ common.Hash{0x4},
+ },
+ }
+ for _, c := range cases {
+ c.op()
+ if tr.base.rootHash() != c.base {
+ t.Fatalf("Unexpected base root, want %v, got: %v", c.base, tr.base.rootHash())
+ }
+ }
+}
+
+func TestDescendant(t *testing.T) {
+ var cases = []struct {
+ init func() *layerTree
+ snapshotA map[common.Hash]map[common.Hash]struct{}
+ op func(tr *layerTree)
+ snapshotB map[common.Hash]map[common.Hash]struct{}
+ }{
+ {
+ // Chain:
+ // C1->C2 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{},
+ // Chain:
+ // C1->C2->C3 (HEAD)
+ op: func(tr *layerTree) {
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2}: {
+ common.Hash{0x3}: {},
+ },
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2}: {
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {},
+ },
+ common.Hash{0x3}: {
+ common.Hash{0x4}: {},
+ },
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4}, 2)
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x3}: {
+ common.Hash{0x4}: {},
+ },
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2}: {
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {},
+ },
+ common.Hash{0x3}: {
+ common.Hash{0x4}: {},
+ },
+ },
+ // Chain:
+ // C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4}, 1)
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{},
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2}: {
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {},
+ },
+ common.Hash{0x3}: {
+ common.Hash{0x4}: {},
+ },
+ },
+ // Chain:
+ // C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4}, 0)
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{},
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C2'->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2a}: {
+ common.Hash{0x3a}: {},
+ common.Hash{0x4a}: {},
+ },
+ common.Hash{0x3a}: {
+ common.Hash{0x4a}: {},
+ },
+ common.Hash{0x2b}: {
+ common.Hash{0x3b}: {},
+ common.Hash{0x4b}: {},
+ },
+ common.Hash{0x3b}: {
+ common.Hash{0x4b}: {},
+ },
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4a}, 2)
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x3a}: {
+ common.Hash{0x4a}: {},
+ },
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C2'->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2a}: {
+ common.Hash{0x3a}: {},
+ common.Hash{0x4a}: {},
+ },
+ common.Hash{0x3a}: {
+ common.Hash{0x4a}: {},
+ },
+ common.Hash{0x2b}: {
+ common.Hash{0x3b}: {},
+ common.Hash{0x4b}: {},
+ },
+ common.Hash{0x3b}: {
+ common.Hash{0x4b}: {},
+ },
+ },
+ // Chain:
+ // C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4a}, 1)
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{},
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ snapshotA: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x2}: {
+ common.Hash{0x3a}: {},
+ common.Hash{0x4a}: {},
+ common.Hash{0x3b}: {},
+ common.Hash{0x4b}: {},
+ },
+ common.Hash{0x3a}: {
+ common.Hash{0x4a}: {},
+ },
+ common.Hash{0x3b}: {
+ common.Hash{0x4b}: {},
+ },
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ // ->C3'->C4'
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4a}, 2)
+ },
+ snapshotB: map[common.Hash]map[common.Hash]struct{}{
+ common.Hash{0x3a}: {
+ common.Hash{0x4a}: {},
+ },
+ common.Hash{0x3b}: {
+ common.Hash{0x4b}: {},
+ },
+ },
+ },
+ }
+ check := func(setA, setB map[common.Hash]map[common.Hash]struct{}) bool {
+ if len(setA) != len(setB) {
+ return false
+ }
+ for h, subA := range setA {
+ subB, ok := setB[h]
+ if !ok {
+ return false
+ }
+ if len(subA) != len(subB) {
+ return false
+ }
+ for hh := range subA {
+ if _, ok := subB[hh]; !ok {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ for _, c := range cases {
+ tr := c.init()
+ if !check(c.snapshotA, tr.descendants) {
+ t.Fatalf("Unexpected descendants")
+ }
+ c.op(tr)
+ if !check(c.snapshotB, tr.descendants) {
+ t.Fatalf("Unexpected descendants")
+ }
+ }
+}
+
+func TestStale(t *testing.T) {
+ var cases = []struct {
+ init func() *layerTree
+ op func(tr *layerTree)
+ stale map[common.Hash]struct{}
+ live map[common.Hash]struct{}
+ }{
+ {
+ // Chain:
+ // C1->C2 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C1->C2->C3 (HEAD)
+ op: func(tr *layerTree) {
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ },
+ stale: map[common.Hash]struct{}{},
+ live: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2}: {},
+ common.Hash{0x3}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4}, 2)
+ },
+ stale: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2}: {}, // old diff layer
+ },
+ live: map[common.Hash]struct{}{
+ common.Hash{0x2}: {}, // new disk layer
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4}, 1)
+ },
+ stale: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2}: {},
+ common.Hash{0x3}: {}, // old diff
+ },
+ live: map[common.Hash]struct{}{
+ common.Hash{0x3}: {}, // new disk
+ common.Hash{0x4}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4}, 0)
+ },
+ stale: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2}: {},
+ common.Hash{0x3}: {},
+ common.Hash{0x4}: {}, // old diff
+ },
+ live: map[common.Hash]struct{}{
+ common.Hash{0x4}: {}, // new disk
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C2'->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4a}, 2)
+ },
+ stale: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2a}: {}, // old diff
+ common.Hash{0x2b}: {},
+ common.Hash{0x3b}: {},
+ common.Hash{0x4b}: {},
+ },
+ live: map[common.Hash]struct{}{
+ common.Hash{0x2a}: {}, // new disk
+ common.Hash{0x3a}: {},
+ common.Hash{0x4a}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C2'->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2a}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2a}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x2b}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2b}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C3->C4 (HEAD)
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4a}, 1)
+ },
+ stale: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2a}: {},
+ common.Hash{0x2b}: {},
+ common.Hash{0x3a}: {}, // old diff
+ common.Hash{0x3b}: {},
+ common.Hash{0x4b}: {},
+ },
+ live: map[common.Hash]struct{}{
+ common.Hash{0x3a}: {}, // new disk
+ common.Hash{0x4a}: {},
+ },
+ },
+ {
+ // Chain:
+ // C1->C2->C3->C4 (HEAD)
+ // ->C3'->C4'
+ init: func() *layerTree {
+ tr := newTestLayerTree()
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3a}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4a}, common.Hash{0x3a}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x3b}, common.Hash{0x2}, 2, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ tr.add(common.Hash{0x4b}, common.Hash{0x3b}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+ return tr
+ },
+ // Chain:
+ // C2->C3->C4 (HEAD)
+ // ->C3'->C4'
+ op: func(tr *layerTree) {
+ tr.cap(common.Hash{0x4a}, 2)
+ },
+ stale: map[common.Hash]struct{}{
+ common.Hash{0x1}: {},
+ common.Hash{0x2}: {}, // old diff
+ },
+ live: map[common.Hash]struct{}{
+ common.Hash{0x2}: {}, // new disk
+ common.Hash{0x3a}: {},
+ common.Hash{0x4a}: {},
+ common.Hash{0x3b}: {},
+ common.Hash{0x4b}: {},
+ },
+ },
+ }
+ for _, c := range cases {
+ tr := c.init()
+ var stale []layer
+ for h := range c.stale {
+ stale = append(stale, tr.get(h))
+ }
+ c.op(tr)
+
+ for _, l := range stale {
+ if !l.isStale() {
+ t.Fatalf("the layer is expected to be stale, %x", l.rootHash())
+ }
+ }
+ for h := range c.live {
+ l := tr.get(h)
+ if l == nil {
+ t.Fatalf("the layer is not reachable, %x", h)
+ }
+ if l.isStale() {
+ t.Fatalf("the layer is expected to be non-stale, %x", l.rootHash())
+ }
+ }
+ }
+}
diff --git a/triedb/pathdb/lookup.go b/triedb/pathdb/lookup.go
new file mode 100644
index 0000000000000..d90b749659378
--- /dev/null
+++ b/triedb/pathdb/lookup.go
@@ -0,0 +1,174 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "golang.org/x/sync/errgroup"
+)
+
+// lookup is an internal help structure to quickly identify
+type lookup struct {
+ nodes map[common.Hash]map[string][]common.Hash
+ descendant func(state common.Hash, ancestor common.Hash) bool
+}
+
+// newLookup initializes the lookup structure.
+func newLookup(head layer, descendant func(state common.Hash, ancestor common.Hash) bool) *lookup {
+ var (
+ current = head
+ layers []layer
+ )
+ for current != nil {
+ layers = append(layers, current)
+ current = current.parentLayer()
+ }
+ l := new(lookup)
+ l.nodes = make(map[common.Hash]map[string][]common.Hash)
+ l.descendant = descendant
+
+ // Apply the layers from bottom to top
+ for i := len(layers) - 1; i >= 0; i-- {
+ switch diff := layers[i].(type) {
+ case *diskLayer:
+ continue
+ case *diffLayer:
+ l.addLayer(diff)
+ }
+ }
+ return l
+}
+
+// nodeTip returns the first state entry that either matches the specified head
+// or is a descendant of it. If all the entries are not qualified, empty hash
+// is returned.
+func (l *lookup) nodeTip(owner common.Hash, path []byte, head common.Hash) common.Hash {
+ subset, exists := l.nodes[owner]
+ if !exists {
+ return common.Hash{}
+ }
+ list := subset[string(path)]
+
+ // Traverse the list in reverse order to find the first entry that either
+ // matches the specified head or is a descendant of it.
+ for i := len(list) - 1; i >= 0; i-- {
+ if list[i] == head || l.descendant(head, list[i]) {
+ return list[i]
+ }
+ }
+ return common.Hash{}
+}
+
+// addLayer traverses all the dirty nodes within the given diff layer and links
+// them into the lookup set.
+func (l *lookup) addLayer(diff *diffLayer) {
+ defer func(now time.Time) {
+ lookupAddLayerTimer.UpdateSince(now)
+ }(time.Now())
+
+ var (
+ state = diff.rootHash()
+ lock sync.Mutex
+ workers errgroup.Group
+ )
+ workers.SetLimit(runtime.NumCPU() / 2)
+
+ for accountHash, nodes := range diff.nodes {
+ accountHash, nodes := accountHash, nodes // closure
+
+ workers.Go(func() error {
+ lock.Lock()
+ subset := l.nodes[accountHash]
+ if subset == nil {
+ subset = make(map[string][]common.Hash)
+ l.nodes[accountHash] = subset
+ }
+ lock.Unlock()
+
+ // Put the layer hash at the end of the list
+ for path := range nodes {
+ subset[path] = append(subset[path], state)
+ }
+ return nil
+ })
+ }
+ workers.Wait()
+}
+
+// removeLayer traverses all the dirty nodes within the given diff layer and
+// unlinks them from the lookup set.
+func (l *lookup) removeLayer(diff *diffLayer) error {
+ defer func(now time.Time) {
+ lookupRemoveLayerTimer.UpdateSince(now)
+ }(time.Now())
+
+ var (
+ state = diff.rootHash()
+ lock sync.RWMutex
+ workers errgroup.Group
+ )
+ workers.SetLimit(runtime.NumCPU() / 2)
+
+ for accountHash, nodes := range diff.nodes {
+ accountHash, nodes := accountHash, nodes // closure
+
+ workers.Go(func() error {
+ lock.RLock()
+ subset := l.nodes[accountHash]
+ if subset == nil {
+ lock.RUnlock()
+ return fmt.Errorf("unknown node owner %x", accountHash)
+ }
+ lock.RUnlock()
+
+ // Traverse the list from oldest to newest to quickly locate the ID
+ // of the stale layer.
+ for path := range nodes {
+ var found bool
+ for j := 0; j < len(subset[path]); j++ {
+ if subset[path][j] == state {
+ if j == 0 {
+ subset[path] = subset[path][1:] // TODO what if the underlying slice is held forever?
+ } else {
+ subset[path] = append(subset[path][:j], subset[path][j+1:]...)
+ }
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("failed to delete lookup %x %v", accountHash, []byte(path))
+ }
+ if len(subset[path]) == 0 {
+ delete(subset, path)
+ }
+ }
+ if len(subset) == 0 {
+ lock.Lock()
+ delete(l.nodes, accountHash)
+ lock.Unlock()
+ }
+ return nil
+ })
+ }
+ return workers.Wait()
+}
diff --git a/triedb/pathdb/lookup_test.go b/triedb/pathdb/lookup_test.go
new file mode 100644
index 0000000000000..21274d9c17493
--- /dev/null
+++ b/triedb/pathdb/lookup_test.go
@@ -0,0 +1,153 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/internal/testrand"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+func makeTestNode(owners []common.Hash, paths [][][]byte) *trienode.MergedNodeSet {
+ merged := trienode.NewMergedNodeSet()
+ for i, owner := range owners {
+ set := trienode.NewNodeSet(owner)
+ for _, path := range paths[i] {
+ blob := testrand.Bytes(32)
+ set.AddNode(path, &trienode.Node{
+ Blob: blob,
+ Hash: crypto.Keccak256Hash(blob),
+ })
+ }
+ merged.Merge(set)
+ }
+ return merged
+}
+
+func TestNodeLookup(t *testing.T) {
+ tr := newTestLayerTree() // base = 0x1
+
+ tr.add(common.Hash{0x2}, common.Hash{0x1}, 1, makeTestNode(
+ []common.Hash{
+ {0xa}, {0xb},
+ },
+ [][][]byte{
+ {
+ {0x1}, {0x2},
+ },
+ {
+ {0x3},
+ },
+ },
+ ), triestate.New(nil, nil))
+
+ tr.add(common.Hash{0x3}, common.Hash{0x2}, 2, makeTestNode(
+ []common.Hash{
+ {0xa}, {0xc},
+ },
+ [][][]byte{
+ {
+ {0x1}, {0x3},
+ },
+ {
+ {0x4},
+ },
+ },
+ ), triestate.New(nil, nil))
+
+ tr.add(common.Hash{0x4}, common.Hash{0x3}, 3, trienode.NewMergedNodeSet(), triestate.New(nil, nil))
+
+ var cases = []struct {
+ account common.Hash
+ path []byte
+ state common.Hash
+ expect common.Hash
+ }{
+ {
+ // unknown owner
+ common.Hash{0xd}, nil, common.Hash{0x4}, common.Hash{0x1},
+ },
+ {
+ // unknown path
+ common.Hash{0xa}, []byte{0x4}, common.Hash{0x4}, common.Hash{0x1},
+ },
+ /*
+ lookup node from the tip
+ */
+ {
+ common.Hash{0xa}, []byte{0x1}, common.Hash{0x4}, common.Hash{0x3},
+ },
+ {
+ common.Hash{0xa}, []byte{0x2}, common.Hash{0x4}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xa}, []byte{0x3}, common.Hash{0x4}, common.Hash{0x3},
+ },
+ {
+ common.Hash{0xb}, []byte{0x3}, common.Hash{0x4}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xc}, []byte{0x4}, common.Hash{0x4}, common.Hash{0x3},
+ },
+ /*
+ lookup node from the middle
+ */
+ {
+ common.Hash{0xa}, []byte{0x1}, common.Hash{0x3}, common.Hash{0x3},
+ },
+ {
+ common.Hash{0xa}, []byte{0x2}, common.Hash{0x3}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xa}, []byte{0x3}, common.Hash{0x3}, common.Hash{0x3},
+ },
+ {
+ common.Hash{0xb}, []byte{0x3}, common.Hash{0x3}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xc}, []byte{0x4}, common.Hash{0x3}, common.Hash{0x3},
+ },
+ /*
+ lookup node from the bottom
+ */
+ {
+ common.Hash{0xa}, []byte{0x1}, common.Hash{0x2}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xa}, []byte{0x2}, common.Hash{0x2}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xa}, []byte{0x3}, common.Hash{0x2}, common.Hash{0x1},
+ },
+ {
+ common.Hash{0xb}, []byte{0x3}, common.Hash{0x2}, common.Hash{0x2},
+ },
+ {
+ common.Hash{0xc}, []byte{0x4}, common.Hash{0x2}, common.Hash{0x1},
+ },
+ }
+ for i, c := range cases {
+ l := tr.lookupNode(c.account, c.path, c.state)
+ if l.rootHash() != c.expect {
+ t.Errorf("Unexpected tiphash, %d, want: %x, got: %x", i, c.expect, l.rootHash())
+ }
+ }
+}
diff --git a/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go
index a250f703cbab3..c1f75dbfb03ea 100644
--- a/triedb/pathdb/metrics.go
+++ b/triedb/pathdb/metrics.go
@@ -45,7 +45,10 @@ var (
diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil)
diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil)
- historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
+ historyBuildTimeMeter = metrics.NewRegisteredResettingTimer("pathdb/history/time", nil)
historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil)
+
+ lookupAddLayerTimer = metrics.NewRegisteredResettingTimer("pathdb/lookup/add/time", nil)
+ lookupRemoveLayerTimer = metrics.NewRegisteredResettingTimer("pathdb/lookup/remove/time", nil)
)
diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go
index 6a58493ba694e..bed9d3e76010a 100644
--- a/triedb/pathdb/reader.go
+++ b/triedb/pathdb/reader.go
@@ -17,6 +17,7 @@
package pathdb
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
@@ -48,15 +49,27 @@ func (loc *nodeLoc) string() string {
// reader implements the database.Reader interface, providing the functionalities to
// retrieve trie nodes by wrapping the internal state layer.
type reader struct {
- layer layer
+ db *Database
+ state common.Hash
noHashCheck bool
+ layer layer
}
// Node implements database.Reader interface, retrieving the node with specified
// node info. Don't modify the returned byte slice since it's not deep-copied
// and still be referenced by database.
func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
- blob, got, loc, err := r.layer.node(owner, path, 0)
+ l := r.db.tree.lookupNode(owner, path, r.state)
+ if l == nil {
+ return nil, errors.New("node is not found")
+ }
+ // Check staleness after querying the lookup set. Otherwise, there is a
+ // theoretical possibility that the layer could be marked as stale after
+ // the initial staleness check.
+ if r.layer.isStale() {
+ return nil, errSnapshotStale
+ }
+ blob, got, loc, err := l.node(owner, path, 0)
if err != nil {
return nil, err
}
@@ -90,5 +103,10 @@ func (db *Database) Reader(root common.Hash) (database.Reader, error) {
if layer == nil {
return nil, fmt.Errorf("state %#x is not available", root)
}
- return &reader{layer: layer, noHashCheck: db.isVerkle}, nil
+ return &reader{
+ db: db,
+ state: root,
+ noHashCheck: db.isVerkle,
+ layer: layer,
+ }, nil
}